{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import datetime\n",
    "from tqdm import tqdm\n",
    "from geopy.distance import geodesic\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 一、特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1、合并训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def merge_data():\n",
    "    # 标签\n",
    "    train_clicks = pd.read_csv(\"data_set_phase1//train_clicks.csv\")\n",
    "\n",
    "    # 特征数据集\n",
    "    train_plans = pd.read_csv(\"data_set_phase1//train_plans.csv\")\n",
    "    train_queries = pd.read_csv(\"data_set_phase1//train_queries.csv\")\n",
    "    test_plans = pd.read_csv(\"data_set_phase1//test_plans.csv\")\n",
    "    test_queries = pd.read_csv(\"data_set_phase1//test_queries.csv\")\n",
    "\n",
    "    # merge训练集\n",
    "    tra_data = train_queries.merge(train_plans, on='sid', how='left')\n",
    "    tra_data = tra_data.merge(train_clicks, on='sid', how='left')\n",
    "    tra_data = tra_data.drop(['click_time'], axis=1)\n",
    "    tra_data['click_mode'] = tra_data['click_mode'].fillna(0)\n",
    "\n",
    "    # merge测试集\n",
    "    tes_data = test_queries.merge(test_plans, on='sid', how='left')\n",
    "    tes_data['click_mode'] = -1\n",
    "\n",
    "    # concat训练集和测试集\n",
    "    all_data = pd.concat([tra_data, tes_data], axis=0)\n",
    "    all_data = all_data.drop(['plan_time'], axis=1)\n",
    "    all_data = all_data.reset_index(drop=True)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2、抽取o、d的特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 将o、d分离，添加POI数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_od_feature(all_data):\n",
    "    all_data['o1'] = all_data['o'].apply(lambda x : float(x.split(',')[0]))\n",
    "    all_data['o2'] = all_data['o'].apply(lambda x : float(x.split(',')[1]))\n",
    "    all_data['d1'] = all_data['d'].apply(lambda x : float(x.split(',')[0]))\n",
    "    all_data['d2'] = all_data['d'].apply(lambda x : float(x.split(',')[1]))\n",
    "    \n",
    "    # od对结合，并labelencoder\n",
    "    le = LabelEncoder()\n",
    "    all_data['od'] = all_data['o'] + all_data['d']\n",
    "    all_data['o_d'] = le.fit_transform(all_data['od'])\n",
    "    \n",
    "    # o是否等于d\n",
    "    all_data['oisd'] = le.fit_transform(all_data['o']==all_data['d'])\n",
    "    \n",
    "    # 添加POI数据\n",
    "    \n",
    "#     # 经纬度距离\n",
    "#     all_data['o_d_distance'] = all_data.apply(lambda x: geodesic((x.o2, x.o1),(x.d2, x.d1)).m, axis=1)\n",
    "    \n",
    "#     POI_data = pd.read_csv(\"data_set_phase1//POIs.csv\", encoding='ANSI')\n",
    "#     POIs = pd.DataFrame()\n",
    "#     POIs.columns = POI_data['tag'].value_counts().index\n",
    "    \n",
    "#    all_data = all_data.drop(['o', 'd'], axis=1)\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 127,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>lng_lat</th>\n",
       "      <th>未知_x</th>\n",
       "      <th>出入口_x</th>\n",
       "      <th>房地产_x</th>\n",
       "      <th>公司企业_x</th>\n",
       "      <th>购物_x</th>\n",
       "      <th>行政地标_x</th>\n",
       "      <th>交通设施_x</th>\n",
       "      <th>教育培训_x</th>\n",
       "      <th>金融_x</th>\n",
       "      <th>...</th>\n",
       "      <th>文化传媒_x</th>\n",
       "      <th>休闲娱乐_x</th>\n",
       "      <th>医疗_x</th>\n",
       "      <th>运动健身_x</th>\n",
       "      <th>政府机构_x</th>\n",
       "      <th>自然地物_x</th>\n",
       "      <th>内部楼号</th>\n",
       "      <th>地产小区</th>\n",
       "      <th>飞机场</th>\n",
       "      <th>餐饮</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>116.17,39.82</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>3</td>\n",
       "      <td>2</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>116.52,39.77</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>116.27,40.22</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>116.27,39.96</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>3</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>3</td>\n",
       "      <td>0</td>\n",
       "      <td>...</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>116.16,39.72</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 25 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        lng_lat  未知_x  出入口_x  房地产_x  公司企业_x  购物_x  行政地标_x  交通设施_x  教育培训_x  \\\n",
       "0  116.17,39.82     0      0      4       0     0       0       0       3   \n",
       "1  116.52,39.77     0      0      4       2     1       0       0       1   \n",
       "2  116.27,40.22     0      1      5       0     0       0       0       1   \n",
       "3  116.27,39.96     0      0      0       3     0       0       0       3   \n",
       "4  116.16,39.72     0      0      4       1     1       0       1       0   \n",
       "\n",
       "   金融_x ...  文化传媒_x  休闲娱乐_x  医疗_x  运动健身_x  政府机构_x  自然地物_x  内部楼号  地产小区  飞机场  餐饮  \n",
       "0     2 ...       0       0     0       0       0       0     0     0    0   0  \n",
       "1     1 ...       0       0     0       0       0       0     0     0    0   0  \n",
       "2     0 ...       0       0     1       0       0       0     0     0    0   0  \n",
       "3     0 ...       1       0     1       0       1       0     0     0    0   0  \n",
       "4     0 ...       0       0     1       0       0       0     0     0    0   0  \n",
       "\n",
       "[5 rows x 25 columns]"
      ]
     },
     "execution_count": 127,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "POIs_feature = pd.read_csv(\"tidy//POIs_feature.csv\", encoding='ANSI')\n",
    "POIs_feature.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 124,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_data['lng_lat'] = all_data['o1'].astype(str)+','+all_data['o2'].astype(str)\n",
    "all_data = all_data.merge(POIs_feature, on='lng_lat', how='left')\n",
    "all_data['lng_lat'] = all_data['d1'].astype(str)+','+all_data['d2'].astype(str)\n",
    "all_data = all_data.merge(POIs_feature, on='lng_lat', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 126,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "sid                0\n",
       "pid                0\n",
       "req_time           0\n",
       "click_mode         0\n",
       "o1                 0\n",
       "o2                 0\n",
       "d1                 0\n",
       "d2                 0\n",
       "o_d                0\n",
       "svd_mode_0         0\n",
       "svd_mode_1         0\n",
       "svd_mode_2         0\n",
       "svd_mode_3         0\n",
       "svd_mode_4         0\n",
       "svd_mode_5         0\n",
       "svd_mode_6         0\n",
       "svd_mode_7         0\n",
       "svd_mode_8         0\n",
       "svd_mode_9         0\n",
       "mode_feas_0        0\n",
       "mode_feas_1        0\n",
       "mode_feas_2        0\n",
       "mode_feas_3        0\n",
       "mode_feas_4        0\n",
       "mode_feas_5        0\n",
       "mode_feas_6        0\n",
       "mode_feas_7        0\n",
       "mode_feas_8        0\n",
       "mode_feas_9        0\n",
       "mode_feas_10       0\n",
       "                ... \n",
       "文化传媒_x_y        6782\n",
       "休闲娱乐_x_y        6782\n",
       "医疗_x_y          6782\n",
       "运动健身_x_y        6782\n",
       "政府机构_x_y        6782\n",
       "自然地物_x_y        6782\n",
       "未知_y_y          6782\n",
       "出入口_y_y         6782\n",
       "房地产_y_y         6782\n",
       "公司企业_y_y        6782\n",
       "购物_y_y          6782\n",
       "行政地标_y_y        6782\n",
       "交通设施_y_y        6782\n",
       "教育培训_y_y        6782\n",
       "金融_y_y          6782\n",
       "酒店_y_y          6782\n",
       "旅游景点_y_y        6782\n",
       "美食_y_y          6782\n",
       "汽车服务_y_y        6782\n",
       "生活服务_y_y        6782\n",
       "文化传媒_y_y        6782\n",
       "休闲娱乐_y_y        6782\n",
       "医疗_y_y          6782\n",
       "运动健身_y_y        6782\n",
       "政府机构_y_y        6782\n",
       "自然地物_y_y        6782\n",
       "内部楼号_y          6782\n",
       "地产小区_y          6782\n",
       "飞机场_y           6782\n",
       "餐饮_y            6782\n",
       "Length: 222, dtype: int64"
      ]
     },
     "execution_count": 126,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_data.isnull().sum()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3、抽取plans的特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 提取plans特征\n",
    "### 1、max_distance、min_distance、mean_distance、std_distance\n",
    "### 2、max_price、min_price、mean_price、std_price\n",
    "### 3、max_eta、min_eta、mean_eta、std_eta\n",
    "### 4、max_dis_mode、min_dis_mode、max_price_mode、min_price_mode、max_eta_mode、min_eta_mode\n",
    "### 5、first_mode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_plan_feature(all_data):\n",
    "    n = all_data.shape[0]\n",
    "    \n",
    "    # 初始化推荐给用户的plans，类似于one-hot编码，推荐了哪一个mode，就置为1\n",
    "    mode_list_feas = np.zeros((n, 12))\n",
    "\n",
    "    # 初始化最大距离、最小距离、平均距离、距离标准差\n",
    "    max_distance, min_distance, mean_distance, std_distance = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大价格、最小价格、平均价格、价格标准差\n",
    "    max_price, min_price, mean_price, std_price = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大用时、最小用时、平均用时、用时标准差\n",
    "    max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大距离mode、最小距离mode、最大价格mode、最小价格mode、最大用时mode、最小用时mode、第一推荐mode\n",
    "    max_dis_mode, min_dis_mode, max_price_mode, min_price_mode, max_eta_mode, min_eta_mode, first_mode = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化推荐mode的顺序\n",
    "    mode_texts=[]\n",
    "    \n",
    "    # 遍历每个用户的plan\n",
    "    for i, plan in tqdm(enumerate(all_data['plans'].values)):\n",
    "        try:\n",
    "            user_plan_list = json.loads(plan)\n",
    "        except:\n",
    "            user_plan_list = []\n",
    "        if len(user_plan_list)==0:\n",
    "            mode_list_feas[i, 0] = 1\n",
    "\n",
    "            first_mode[i] = 0\n",
    "\n",
    "            max_distance[i] = -1\n",
    "            min_distance[i] = -1\n",
    "            mean_distance[i] = -1\n",
    "            std_distance[i] = -1\n",
    "\n",
    "            max_price[i] = -1\n",
    "            min_price[i] = -1\n",
    "            mean_price[i] = -1\n",
    "            std_price[i] = -1\n",
    "\n",
    "            max_eta[i] = -1\n",
    "            min_eta[i] = -1\n",
    "            mean_eta[i] = -1\n",
    "            std_eta[i] = -1\n",
    "\n",
    "            max_dis_mode[i] = -1\n",
    "            min_dis_mode[i] = -1\n",
    "            max_price_mode[i] = -1\n",
    "            min_price_mode[i] = -1\n",
    "            max_eta_mode[i] = -1\n",
    "            min_eta_mode[i] = -1\n",
    "\n",
    "            mode_texts.append('word_null')\n",
    "        else:\n",
    "            distance_list = []\n",
    "            price_list = []\n",
    "            eta_list = []\n",
    "            mode_list = []\n",
    "\n",
    "            # 抽取每个用户的每个plan\n",
    "            for tmp_dict in user_plan_list:\n",
    "                distance_list.append(int(tmp_dict['distance']))\n",
    "                if tmp_dict['price']=='':\n",
    "                    price_list.append(0)\n",
    "                else:\n",
    "                    price_list.append(int(tmp_dict['price']))\n",
    "                eta_list.append(int(tmp_dict['eta']))\n",
    "                mode_list.append(int(tmp_dict['transport_mode']))\n",
    "\n",
    "            # 将每个用户的推荐模型按顺序添加\n",
    "            mode_texts.append(' '.join(['word_{}'.format(mode) for mode in mode_list]))\n",
    "\n",
    "            # 将list转换成ndarray\n",
    "            distance_list = np.array(distance_list)\n",
    "            price_list = np.array(price_list)\n",
    "            eta_list = np.array(eta_list)\n",
    "            mode_list = np.array(mode_list, dtype='int')\n",
    "            \n",
    "            # 将有plans推荐的用户的mode置为1\n",
    "            mode_list_feas[i, mode_list] = 1\n",
    "\n",
    "            # 获取索引\n",
    "            distance_sort_idx = np.argsort(distance_list)\n",
    "            price_sort_idx = np.argsort(price_list)\n",
    "            eta_sort_idx = np.argsort(eta_list)\n",
    "\n",
    "            # 构建特征\n",
    "            max_distance[i] = distance_list[distance_sort_idx[-1]]\n",
    "            min_distance[i] = distance_list[distance_sort_idx[0]]\n",
    "            mean_distance[i] = np.mean(distance_list)\n",
    "            std_distance[i] = np.std(distance_list)\n",
    "\n",
    "            max_price[i] = price_list[price_sort_idx[-1]]\n",
    "            min_price[i] = price_list[price_sort_idx[0]]\n",
    "            mean_price[i] = np.mean(price_list)\n",
    "            std_price[i] = np.std(price_list)\n",
    "\n",
    "            max_eta[i] = eta_list[eta_sort_idx[-1]]\n",
    "            min_eta[i] = eta_list[eta_sort_idx[0]]\n",
    "            mean_eta[i] = np.mean(eta_list)\n",
    "            std_eta[i] = np.std(eta_list)\n",
    "\n",
    "            first_mode[i] = mode_list[0]\n",
    "\n",
    "            max_dis_mode[i] = mode_list[distance_sort_idx[-1]]\n",
    "            min_dis_mode[i] = mode_list[distance_sort_idx[0]]\n",
    "\n",
    "            max_price_mode[i] = mode_list[price_sort_idx[-1]]\n",
    "            min_price_mode[i] = mode_list[price_sort_idx[0]]\n",
    "\n",
    "            max_eta_mode[i] = mode_list[eta_sort_idx[-1]]\n",
    "            min_eta_mode[i] = mode_list[eta_sort_idx[0]]\n",
    "\n",
    "    # 将特征存储进DataFrame中\n",
    "    plan_feature_data = pd.DataFrame(mode_list_feas)\n",
    "    plan_feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]\n",
    "\n",
    "    plan_feature_data['max_distance'] = max_distance\n",
    "    plan_feature_data['min_distance'] = min_distance\n",
    "    plan_feature_data['mean_distance'] = mean_distance\n",
    "    plan_feature_data['std_distance'] = std_distance\n",
    "\n",
    "    plan_feature_data['max_price'] = max_price\n",
    "    plan_feature_data['min_price'] = min_price\n",
    "    plan_feature_data['mean_price'] = mean_price\n",
    "    plan_feature_data['std_price'] = std_price\n",
    "\n",
    "    plan_feature_data['max_eta'] = max_eta\n",
    "    plan_feature_data['min_eta'] = min_eta\n",
    "    plan_feature_data['mean_eta'] = mean_eta\n",
    "    plan_feature_data['std_eta'] = std_eta\n",
    "\n",
    "    plan_feature_data['max_dis_mode'] = max_dis_mode\n",
    "    plan_feature_data['min_dis_mode'] = min_dis_mode\n",
    "    plan_feature_data['max_price_mode'] = max_price_mode\n",
    "    plan_feature_data['min_price_mode'] = min_price_mode\n",
    "    plan_feature_data['max_eta_mode'] = max_eta_mode\n",
    "    plan_feature_data['min_eta_mode'] = min_eta_mode\n",
    "\n",
    "    plan_feature_data['first_mode'] = first_mode\n",
    "\n",
    "    # tiidf提取特征\n",
    "    tfidf = TfidfVectorizer(ngram_range=(1, 2))\n",
    "    tfidf_vec = tfidf.fit_transform(mode_texts)\n",
    "    svd = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)\n",
    "    mode_svd = svd.fit_transform(tfidf_vec)\n",
    "    \n",
    "    # 转换成dataframe\n",
    "    mode_svd = pd.DataFrame(mode_svd)\n",
    "    mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]\n",
    "\n",
    "    all_data = pd.concat([all_data, mode_svd, plan_feature_data], axis=1)\n",
    "    all_data = all_data.drop(['plans'], axis=1)\n",
    "    \n",
    "    return  all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4、抽取profiles数据集特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_profiles_feature(all_data):\n",
    "    profiles = pd.read_csv(\"data_set_phase1//profiles.csv\")\n",
    "\n",
    "    # 用于填充没有pid的用户\n",
    "    profiles_na = np.zeros(67)\n",
    "    profiles_na[0] = -1\n",
    "    profiles_na = pd.DataFrame(profiles_na.reshape(1, -1))\n",
    "    profiles_na.columns = profiles.columns\n",
    "    profiles = profiles.append(profiles_na)\n",
    "    \n",
    "#     # 对特征进行奇异值分解，实现降维\n",
    "#     pi = profiles.drop(['pid'], axis=1).values\n",
    "#     svd = TruncatedSVD(n_components=60, n_iter=20, random_state=2019)\n",
    "#     profiles_svd = svd.fit_transform(pi)\n",
    "    \n",
    "#     # 转换成dataframe\n",
    "#     profiles_svd = pd.DataFrame(profiles_svd)\n",
    "#     profiles_svd.columns = ['svd_profiles_{}'.format(i) for i in range(60)]\n",
    "#     profiles_svd['pid'] = profiles['pid'].values\n",
    "\n",
    "    # 合并数据集\n",
    "    all_data['pid'] = all_data['pid'].fillna(-1)\n",
    "    all_data = all_data.merge(profiles, on='pid', how='left')\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 5、抽取时间特征（req_time）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 距离国庆节的天数、月份、一年中第几天、周几、小时、小时cat、是否是假期、是否是周末"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_time_feature(all_data):\n",
    "    all_data['req_time'] = pd.to_datetime(all_data['req_time'])\n",
    "    all_data['dayofweek'] = all_data['req_time'].dt.dayofweek\n",
    "    all_data['hourofday'] = all_data['req_time'].dt.hour\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 6、提取pid特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 根据lgbm的特征重要度发现，pid是个强特，所以对pid进行特征提取\n",
    "### 1、统计每个pid出现的次数，将次数作为特征\n",
    "### 2、统计每个pid在每个类别中出现的次数（这个有问题）\n",
    "### 3、pid与时间特征的组合出现的次数\n",
    "### 4、pid与时间特征的组合在每个类别中出现的次数（这个有问题）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_pid_feature(all_data):\n",
    "    \n",
    "    # 统计每个pid出现的次数\n",
    "    pid_counts = pd.DataFrame()\n",
    "    counts = all_data['pid'].value_counts()\n",
    "    index = counts.index\n",
    "    pid_counts['pid'] = index\n",
    "    pid_counts['pid_counts'] = list(counts)\n",
    "    \n",
    "    # pid与o组合出现的次数\n",
    "    grouped = all_data['o'].groupby(all_data['pid'])\n",
    "    pidCombineO = grouped.value_counts()\n",
    "    pidCombineO.to_csv('tidy//pidCombineO.csv')\n",
    "    pidCombineO = pd.read_csv('tidy//pidCombineO.csv')\n",
    "    pidCombineO.columns = ['pid', 'o', 'pid_o_counts']\n",
    "    all_data = all_data.merge(pidCombineO, on=['pid', 'o'], how='left')\n",
    "    \n",
    "    # pid与d组合出现的次数\n",
    "    grouped = all_data['d'].groupby(all_data['pid'])\n",
    "    pidCombineD = grouped.value_counts()\n",
    "    pidCombineD.to_csv('tidy//pidCombineD.csv')\n",
    "    pidCombineD = pd.read_csv('tidy//pidCombineD.csv')\n",
    "    pidCombineD.columns = ['pid', 'd', 'pid_d_counts']\n",
    "    all_data = all_data.merge(pidCombineD, on=['pid', 'd'], how='left')\n",
    "    \n",
    "    # pid与od组合出现的次数\n",
    "    grouped = all_data['o_d'].groupby(all_data['pid'])\n",
    "    pidCombineOD = grouped.value_counts()\n",
    "    pidCombineOD.to_csv('tidy//pidCombineOD.csv')\n",
    "    pidCombineOD = pd.read_csv('tidy//pidCombineOD.csv')\n",
    "    pidCombineOD.columns = ['pid', 'o_d', 'pid_od_counts']\n",
    "    all_data = all_data.merge(pidCombineOD, on=['pid', 'o_d'], how='left') \n",
    "    \n",
    "    # 填充缺失值\n",
    "    all_data['pid_o_counts'] = all_data['pid_o_counts'].fillna(0)\n",
    "    all_data['pid_d_counts'] = all_data['pid_d_counts'].fillna(0)\n",
    "    all_data['pid_od_counts'] = all_data['pid_od_counts'].fillna(0)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 7、特征组合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def combine_feature(all_data):\n",
    "    le = LabelEncoder()\n",
    "    \n",
    "    # 组合pid与od\n",
    "    all_data['combine_pid_od'] = all_data['pid'].astype(str) + all_data['od']\n",
    "    all_data['combine_pid_od'] = le.fit_transform(all_data['combine_pid_od'])\n",
    "    \n",
    "    # 组合pid与first_mode\n",
    "    all_data['combine_pid_fm'] = all_data['pid'].astype(str) + all_data['first_mode'].astype(str)\n",
    "    all_data['combine_pid_fm'] = le.fit_transform(all_data['combine_pid_fm'])\n",
    "    \n",
    "    # 组合od与first_mode\n",
    "    all_data['combine_od_fm'] = all_data['od'].astype(str) + all_data['first_mode'].astype(str)\n",
    "    all_data['combine_od_fm'] = le.fit_transform(all_data['combine_od_fm'])\n",
    "    \n",
    "    # 组合oisd与first_mode\n",
    "    all_data['oisd_fm'] = all_data['oisd'].astype(str) + all_data['first_mode'].astype(str)\n",
    "    all_data['oisd_fm'] = le.fit_transform(all_data['oisd_fm'])\n",
    "    \n",
    "    # 组合pid与max_dis_mode\n",
    "    all_data['combine_pid_maxdm'] = all_data['pid'].astype(str) + all_data['max_dis_mode'].astype(str)\n",
    "    all_data['combine_pid_maxdm'] = le.fit_transform(all_data['combine_pid_maxdm'])\n",
    "    \n",
    "    # 组合pid与min_dis_mode\n",
    "    all_data['combine_pid_mindm'] = all_data['pid'].astype(str) + all_data['min_dis_mode'].astype(str)\n",
    "    all_data['combine_pid_mindm'] = le.fit_transform(all_data['combine_pid_mindm'])\n",
    "    \n",
    "    # 组合pid与max_price_mode\n",
    "    all_data['combine_pid_maxpm'] = all_data['pid'].astype(str) + all_data['max_price_mode'].astype(str)\n",
    "    all_data['combine_pid_maxpm'] = le.fit_transform(all_data['combine_pid_maxpm'])\n",
    "    \n",
    "    # 组合pid与min_price_mode\n",
    "    all_data['combine_pid_minpm'] = all_data['pid'].astype(str) + all_data['min_price_mode'].astype(str)\n",
    "    all_data['combine_pid_minpm'] = le.fit_transform(all_data['combine_pid_minpm'])\n",
    "    \n",
    "    # 组合pid与max_eta_mode\n",
    "    all_data['combine_pid_maxem'] = all_data['pid'].astype(str) + all_data['max_eta_mode'].astype(str)\n",
    "    all_data['combine_pid_maxem'] = le.fit_transform(all_data['combine_pid_maxem'])\n",
    "    \n",
    "    # 组合pid与min_eta_mode\n",
    "    all_data['combine_pid_minem'] = all_data['pid'].astype(str) + all_data['min_eta_mode'].astype(str)\n",
    "    all_data['combine_pid_minem'] = le.fit_transform(all_data['combine_pid_minem'])\n",
    "    \n",
    "    all_data = all_data.drop(['o', 'd', 'oisd', 'od'], axis=1)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>sid</th>\n",
       "      <th>pid</th>\n",
       "      <th>req_time</th>\n",
       "      <th>click_mode</th>\n",
       "      <th>o1</th>\n",
       "      <th>o2</th>\n",
       "      <th>d1</th>\n",
       "      <th>d2</th>\n",
       "      <th>o_d</th>\n",
       "      <th>svd_mode_0</th>\n",
       "      <th>...</th>\n",
       "      <th>combine_pid_od</th>\n",
       "      <th>combine_pid_fm</th>\n",
       "      <th>combine_od_fm</th>\n",
       "      <th>oisd_fm</th>\n",
       "      <th>combine_pid_maxdm</th>\n",
       "      <th>combine_pid_mindm</th>\n",
       "      <th>combine_pid_maxpm</th>\n",
       "      <th>combine_pid_minpm</th>\n",
       "      <th>combine_pid_maxem</th>\n",
       "      <th>combine_pid_minem</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>3000821</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>2018-11-02 17:54:30</td>\n",
       "      <td>9.0</td>\n",
       "      <td>116.29</td>\n",
       "      <td>39.97</td>\n",
       "      <td>116.32</td>\n",
       "      <td>39.96</td>\n",
       "      <td>50736</td>\n",
       "      <td>0.371088</td>\n",
       "      <td>...</td>\n",
       "      <td>17718</td>\n",
       "      <td>11</td>\n",
       "      <td>60125</td>\n",
       "      <td>11</td>\n",
       "      <td>4</td>\n",
       "      <td>7</td>\n",
       "      <td>6</td>\n",
       "      <td>5</td>\n",
       "      <td>7</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>3085857</td>\n",
       "      <td>210736.0</td>\n",
       "      <td>2018-11-16 10:53:10</td>\n",
       "      <td>1.0</td>\n",
       "      <td>116.39</td>\n",
       "      <td>39.84</td>\n",
       "      <td>116.33</td>\n",
       "      <td>39.79</td>\n",
       "      <td>132718</td>\n",
       "      <td>0.509334</td>\n",
       "      <td>...</td>\n",
       "      <td>451840</td>\n",
       "      <td>118061</td>\n",
       "      <td>173093</td>\n",
       "      <td>9</td>\n",
       "      <td>123757</td>\n",
       "      <td>118189</td>\n",
       "      <td>67614</td>\n",
       "      <td>72885</td>\n",
       "      <td>109045</td>\n",
       "      <td>79448</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2944522</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>2018-10-06 10:33:58</td>\n",
       "      <td>9.0</td>\n",
       "      <td>116.31</td>\n",
       "      <td>39.93</td>\n",
       "      <td>116.27</td>\n",
       "      <td>40.00</td>\n",
       "      <td>67656</td>\n",
       "      <td>0.376407</td>\n",
       "      <td>...</td>\n",
       "      <td>25028</td>\n",
       "      <td>11</td>\n",
       "      <td>82715</td>\n",
       "      <td>11</td>\n",
       "      <td>9</td>\n",
       "      <td>8</td>\n",
       "      <td>6</td>\n",
       "      <td>5</td>\n",
       "      <td>9</td>\n",
       "      <td>5</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>559931</td>\n",
       "      <td>202427.0</td>\n",
       "      <td>2018-11-23 14:54:11</td>\n",
       "      <td>1.0</td>\n",
       "      <td>116.27</td>\n",
       "      <td>39.88</td>\n",
       "      <td>116.39</td>\n",
       "      <td>39.90</td>\n",
       "      <td>35206</td>\n",
       "      <td>0.427644</td>\n",
       "      <td>...</td>\n",
       "      <td>425368</td>\n",
       "      <td>109628</td>\n",
       "      <td>40252</td>\n",
       "      <td>2</td>\n",
       "      <td>114878</td>\n",
       "      <td>109716</td>\n",
       "      <td>62741</td>\n",
       "      <td>67633</td>\n",
       "      <td>101150</td>\n",
       "      <td>73694</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>2819352</td>\n",
       "      <td>172251.0</td>\n",
       "      <td>2018-10-30 11:48:41</td>\n",
       "      <td>7.0</td>\n",
       "      <td>116.34</td>\n",
       "      <td>39.96</td>\n",
       "      <td>116.37</td>\n",
       "      <td>39.86</td>\n",
       "      <td>97471</td>\n",
       "      <td>0.513487</td>\n",
       "      <td>...</td>\n",
       "      <td>330980</td>\n",
       "      <td>76989</td>\n",
       "      <td>124090</td>\n",
       "      <td>9</td>\n",
       "      <td>80702</td>\n",
       "      <td>77115</td>\n",
       "      <td>44051</td>\n",
       "      <td>47400</td>\n",
       "      <td>71005</td>\n",
       "      <td>51705</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 131 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       sid       pid            req_time  click_mode      o1     o2      d1  \\\n",
       "0  3000821      -1.0 2018-11-02 17:54:30         9.0  116.29  39.97  116.32   \n",
       "1  3085857  210736.0 2018-11-16 10:53:10         1.0  116.39  39.84  116.33   \n",
       "2  2944522      -1.0 2018-10-06 10:33:58         9.0  116.31  39.93  116.27   \n",
       "3   559931  202427.0 2018-11-23 14:54:11         1.0  116.27  39.88  116.39   \n",
       "4  2819352  172251.0 2018-10-30 11:48:41         7.0  116.34  39.96  116.37   \n",
       "\n",
       "      d2     o_d  svd_mode_0        ...          combine_pid_od  \\\n",
       "0  39.96   50736    0.371088        ...                   17718   \n",
       "1  39.79  132718    0.509334        ...                  451840   \n",
       "2  40.00   67656    0.376407        ...                   25028   \n",
       "3  39.90   35206    0.427644        ...                  425368   \n",
       "4  39.86   97471    0.513487        ...                  330980   \n",
       "\n",
       "   combine_pid_fm  combine_od_fm  oisd_fm  combine_pid_maxdm  \\\n",
       "0              11          60125       11                  4   \n",
       "1          118061         173093        9             123757   \n",
       "2              11          82715       11                  9   \n",
       "3          109628          40252        2             114878   \n",
       "4           76989         124090        9              80702   \n",
       "\n",
       "   combine_pid_mindm  combine_pid_maxpm  combine_pid_minpm  combine_pid_maxem  \\\n",
       "0                  7                  6                  5                  7   \n",
       "1             118189              67614              72885             109045   \n",
       "2                  8                  6                  5                  9   \n",
       "3             109716              62741              67633             101150   \n",
       "4              77115              44051              47400              71005   \n",
       "\n",
       "   combine_pid_minem  \n",
       "0                  5  \n",
       "1              79448  \n",
       "2                  5  \n",
       "3              73694  \n",
       "4              51705  \n",
       "\n",
       "[5 rows x 131 columns]"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "all_data.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 7、切分数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_test_split(all_data):\n",
    "    train_data = all_data[all_data['click_mode']!=-1]\n",
    "    test_data = all_data[all_data['click_mode']==-1]\n",
    "    test_data = test_data.drop(['click_mode'], axis=1)\n",
    "    submit = test_data[['sid']].copy()\n",
    "    \n",
    "    train_data = train_data.drop(['sid', 'pid'], axis=1)\n",
    "    train_y = train_data[['req_time','click_mode']]\n",
    "    train_x = train_data.drop(['click_mode'], axis=1)\n",
    "    test_x = test_data.drop(['sid','req_time','pid'], axis=1)\n",
    "    \n",
    "    return train_x, train_y, test_x, submit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "594358it [01:33, 6385.55it/s]\n"
     ]
    }
   ],
   "source": [
    "all_data = merge_data()\n",
    "all_data = gen_od_feature(all_data)\n",
    "all_data = gen_plan_feature(all_data)\n",
    "all_data = gen_profiles_feature(all_data)\n",
    "all_data = gen_time_feature(all_data)\n",
    "all_data = gen_pid_feature(all_data)\n",
    "all_data = combine_feature(all_data)\n",
    "train_x, train_y, test_x, submit = train_test_split(all_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 8、国庆嫁接"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.metrics import f1_score\n",
    "from time import gmtime, strftime\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "# 模型评估，采用f1-score\n",
    "def f1_weighted(y_true, y_pred):\n",
    "    y_pred = y_pred.reshape(12, -1).T\n",
    "    y_pred = np.argmax(y_pred, axis=1)\n",
    "    score = f1_score(y_true, y_pred, average='weighted')\n",
    "    return 'weighted-f1-score', score, True\n",
    "\n",
    "# 以国庆节期间的数据做训练集\n",
    "tr_x = train_x[train_x.req_time < '2018-10-08']\n",
    "tr_y = train_y[train_y.req_time < '2018-10-08']\n",
    "tr_x = tr_x.drop(['req_time'], axis=1)\n",
    "tr_y = tr_y.drop(['req_time'], axis=1)\n",
    "\n",
    "# 以其他数据做测试集\n",
    "te_x = train_x[train_x.req_time >= '2018-10-08']\n",
    "te_x = te_x.drop(['req_time'], axis=1)\n",
    "te_x = pd.concat([te_x, test_x], axis=0)\n",
    "\n",
    "categorical_feature = ['pid', 'max_dis_mode', 'min_dis_mode', 'max_price_mode', 'min_price_mode',\n",
    "                       'max_eta_mode', 'min_eta_mode', 'first_mode']\n",
    "\n",
    "lgb = LGBMClassifier(boosting_type='gbdt', num_leaves=61, objective='multiclass', reg_alpha=0, reg_lambda=0.01, max_depth=1, \n",
    "                    n_estimators=2000, subsample=0.8, colsample_bytree=0.8, subsample_freq=1, min_child_samples=50,\n",
    "                    learning_rate=0.05, random_state=2019, metric='multiclass', n_jobs=-1)\n",
    "lgb.fit(tr_x, tr_y, categorical_feature=categorical_feature)\n",
    "\n",
    "y_hat = lgb.predict(te_x)\n",
    "y_pred = lgb.predict_proba(te_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_hat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 8、模型训练&验证&提交"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training until validation scores don't improve for 100 rounds.\n",
      "[10]\tvalid_0's multi_logloss: 1.55541\tvalid_0's weighted-f1-score: 0.60418\n",
      "[20]\tvalid_0's multi_logloss: 1.33314\tvalid_0's weighted-f1-score: 0.635889\n",
      "[30]\tvalid_0's multi_logloss: 1.20069\tvalid_0's weighted-f1-score: 0.664579\n",
      "[40]\tvalid_0's multi_logloss: 1.11522\tvalid_0's weighted-f1-score: 0.667863\n",
      "[50]\tvalid_0's multi_logloss: 1.05645\tvalid_0's weighted-f1-score: 0.671195\n",
      "[60]\tvalid_0's multi_logloss: 1.01521\tvalid_0's weighted-f1-score: 0.672498\n",
      "[70]\tvalid_0's multi_logloss: 0.984733\tvalid_0's weighted-f1-score: 0.672988\n",
      "[80]\tvalid_0's multi_logloss: 0.962333\tvalid_0's weighted-f1-score: 0.673742\n",
      "[90]\tvalid_0's multi_logloss: 0.945165\tvalid_0's weighted-f1-score: 0.674552\n",
      "[100]\tvalid_0's multi_logloss: 0.93212\tvalid_0's weighted-f1-score: 0.676264\n",
      "[110]\tvalid_0's multi_logloss: 0.921795\tvalid_0's weighted-f1-score: 0.676446\n",
      "[120]\tvalid_0's multi_logloss: 0.913815\tvalid_0's weighted-f1-score: 0.676815\n",
      "[130]\tvalid_0's multi_logloss: 0.907292\tvalid_0's weighted-f1-score: 0.676951\n",
      "[140]\tvalid_0's multi_logloss: 0.902064\tvalid_0's weighted-f1-score: 0.678033\n",
      "[150]\tvalid_0's multi_logloss: 0.897557\tvalid_0's weighted-f1-score: 0.678151\n",
      "[160]\tvalid_0's multi_logloss: 0.893799\tvalid_0's weighted-f1-score: 0.678424\n",
      "[170]\tvalid_0's multi_logloss: 0.890593\tvalid_0's weighted-f1-score: 0.6787\n",
      "[180]\tvalid_0's multi_logloss: 0.888073\tvalid_0's weighted-f1-score: 0.678845\n",
      "[190]\tvalid_0's multi_logloss: 0.885869\tvalid_0's weighted-f1-score: 0.678997\n",
      "[200]\tvalid_0's multi_logloss: 0.883999\tvalid_0's weighted-f1-score: 0.679092\n",
      "[210]\tvalid_0's multi_logloss: 0.882365\tvalid_0's weighted-f1-score: 0.679426\n",
      "[220]\tvalid_0's multi_logloss: 0.880977\tvalid_0's weighted-f1-score: 0.679602\n",
      "[230]\tvalid_0's multi_logloss: 0.879595\tvalid_0's weighted-f1-score: 0.681085\n",
      "[240]\tvalid_0's multi_logloss: 0.878519\tvalid_0's weighted-f1-score: 0.681553\n",
      "[250]\tvalid_0's multi_logloss: 0.877503\tvalid_0's weighted-f1-score: 0.681687\n",
      "[260]\tvalid_0's multi_logloss: 0.8767\tvalid_0's weighted-f1-score: 0.682315\n",
      "[270]\tvalid_0's multi_logloss: 0.87603\tvalid_0's weighted-f1-score: 0.682794\n",
      "[280]\tvalid_0's multi_logloss: 0.875398\tvalid_0's weighted-f1-score: 0.683111\n",
      "[290]\tvalid_0's multi_logloss: 0.87481\tvalid_0's weighted-f1-score: 0.68364\n",
      "[300]\tvalid_0's multi_logloss: 0.874236\tvalid_0's weighted-f1-score: 0.683738\n",
      "[310]\tvalid_0's multi_logloss: 0.873677\tvalid_0's weighted-f1-score: 0.683963\n",
      "[320]\tvalid_0's multi_logloss: 0.87322\tvalid_0's weighted-f1-score: 0.684077\n",
      "[330]\tvalid_0's multi_logloss: 0.872823\tvalid_0's weighted-f1-score: 0.684166\n",
      "[340]\tvalid_0's multi_logloss: 0.872503\tvalid_0's weighted-f1-score: 0.684588\n",
      "[350]\tvalid_0's multi_logloss: 0.872072\tvalid_0's weighted-f1-score: 0.684885\n",
      "[360]\tvalid_0's multi_logloss: 0.871641\tvalid_0's weighted-f1-score: 0.685207\n",
      "[370]\tvalid_0's multi_logloss: 0.871394\tvalid_0's weighted-f1-score: 0.685397\n",
      "[380]\tvalid_0's multi_logloss: 0.871109\tvalid_0's weighted-f1-score: 0.685502\n",
      "[390]\tvalid_0's multi_logloss: 0.870804\tvalid_0's weighted-f1-score: 0.685649\n",
      "[400]\tvalid_0's multi_logloss: 0.870487\tvalid_0's weighted-f1-score: 0.685857\n",
      "[410]\tvalid_0's multi_logloss: 0.870245\tvalid_0's weighted-f1-score: 0.686055\n",
      "[420]\tvalid_0's multi_logloss: 0.870023\tvalid_0's weighted-f1-score: 0.686372\n",
      "[430]\tvalid_0's multi_logloss: 0.86981\tvalid_0's weighted-f1-score: 0.68647\n",
      "[440]\tvalid_0's multi_logloss: 0.869601\tvalid_0's weighted-f1-score: 0.686498\n",
      "[450]\tvalid_0's multi_logloss: 0.869454\tvalid_0's weighted-f1-score: 0.686409\n",
      "[460]\tvalid_0's multi_logloss: 0.869256\tvalid_0's weighted-f1-score: 0.686657\n",
      "[470]\tvalid_0's multi_logloss: 0.869053\tvalid_0's weighted-f1-score: 0.686702\n",
      "[480]\tvalid_0's multi_logloss: 0.868925\tvalid_0's weighted-f1-score: 0.686731\n",
      "[490]\tvalid_0's multi_logloss: 0.868757\tvalid_0's weighted-f1-score: 0.686734\n",
      "[500]\tvalid_0's multi_logloss: 0.868628\tvalid_0's weighted-f1-score: 0.686755\n",
      "[510]\tvalid_0's multi_logloss: 0.868487\tvalid_0's weighted-f1-score: 0.68672\n",
      "[520]\tvalid_0's multi_logloss: 0.868402\tvalid_0's weighted-f1-score: 0.686919\n",
      "[530]\tvalid_0's multi_logloss: 0.86832\tvalid_0's weighted-f1-score: 0.687146\n",
      "[540]\tvalid_0's multi_logloss: 0.86819\tvalid_0's weighted-f1-score: 0.68716\n",
      "[550]\tvalid_0's multi_logloss: 0.868049\tvalid_0's weighted-f1-score: 0.687212\n",
      "[560]\tvalid_0's multi_logloss: 0.867956\tvalid_0's weighted-f1-score: 0.687317\n",
      "[570]\tvalid_0's multi_logloss: 0.867931\tvalid_0's weighted-f1-score: 0.687326\n",
      "[580]\tvalid_0's multi_logloss: 0.867796\tvalid_0's weighted-f1-score: 0.687448\n",
      "[590]\tvalid_0's multi_logloss: 0.867659\tvalid_0's weighted-f1-score: 0.68753\n",
      "[600]\tvalid_0's multi_logloss: 0.867574\tvalid_0's weighted-f1-score: 0.687472\n",
      "[610]\tvalid_0's multi_logloss: 0.867513\tvalid_0's weighted-f1-score: 0.687528\n",
      "[620]\tvalid_0's multi_logloss: 0.867449\tvalid_0's weighted-f1-score: 0.687613\n",
      "[630]\tvalid_0's multi_logloss: 0.867297\tvalid_0's weighted-f1-score: 0.687708\n",
      "[640]\tvalid_0's multi_logloss: 0.867249\tvalid_0's weighted-f1-score: 0.687735\n",
      "[650]\tvalid_0's multi_logloss: 0.867143\tvalid_0's weighted-f1-score: 0.687817\n",
      "[660]\tvalid_0's multi_logloss: 0.867063\tvalid_0's weighted-f1-score: 0.687793\n",
      "[670]\tvalid_0's multi_logloss: 0.866958\tvalid_0's weighted-f1-score: 0.687887\n",
      "[680]\tvalid_0's multi_logloss: 0.866892\tvalid_0's weighted-f1-score: 0.687876\n",
      "[690]\tvalid_0's multi_logloss: 0.866843\tvalid_0's weighted-f1-score: 0.687886\n",
      "[700]\tvalid_0's multi_logloss: 0.866763\tvalid_0's weighted-f1-score: 0.687858\n",
      "[710]\tvalid_0's multi_logloss: 0.866719\tvalid_0's weighted-f1-score: 0.687834\n",
      "[720]\tvalid_0's multi_logloss: 0.86663\tvalid_0's weighted-f1-score: 0.687803\n",
      "[730]\tvalid_0's multi_logloss: 0.866559\tvalid_0's weighted-f1-score: 0.687793\n",
      "[740]\tvalid_0's multi_logloss: 0.866523\tvalid_0's weighted-f1-score: 0.687904\n",
      "[750]\tvalid_0's multi_logloss: 0.866479\tvalid_0's weighted-f1-score: 0.687838\n",
      "[760]\tvalid_0's multi_logloss: 0.866433\tvalid_0's weighted-f1-score: 0.687892\n",
      "[770]\tvalid_0's multi_logloss: 0.866394\tvalid_0's weighted-f1-score: 0.687909\n",
      "[780]\tvalid_0's multi_logloss: 0.866323\tvalid_0's weighted-f1-score: 0.687979\n",
      "[790]\tvalid_0's multi_logloss: 0.866249\tvalid_0's weighted-f1-score: 0.688083\n",
      "[800]\tvalid_0's multi_logloss: 0.866176\tvalid_0's weighted-f1-score: 0.68803\n",
      "[810]\tvalid_0's multi_logloss: 0.866137\tvalid_0's weighted-f1-score: 0.688055\n",
      "[820]\tvalid_0's multi_logloss: 0.866108\tvalid_0's weighted-f1-score: 0.688112\n",
      "[830]\tvalid_0's multi_logloss: 0.866016\tvalid_0's weighted-f1-score: 0.688178\n",
      "[840]\tvalid_0's multi_logloss: 0.865966\tvalid_0's weighted-f1-score: 0.688201\n",
      "[850]\tvalid_0's multi_logloss: 0.865944\tvalid_0's weighted-f1-score: 0.688175\n",
      "[860]\tvalid_0's multi_logloss: 0.865907\tvalid_0's weighted-f1-score: 0.688287\n",
      "[870]\tvalid_0's multi_logloss: 0.865897\tvalid_0's weighted-f1-score: 0.68838\n",
      "[880]\tvalid_0's multi_logloss: 0.865854\tvalid_0's weighted-f1-score: 0.688417\n",
      "[890]\tvalid_0's multi_logloss: 0.865814\tvalid_0's weighted-f1-score: 0.68848\n",
      "[900]\tvalid_0's multi_logloss: 0.865773\tvalid_0's weighted-f1-score: 0.688484\n",
      "[910]\tvalid_0's multi_logloss: 0.865737\tvalid_0's weighted-f1-score: 0.688425\n",
      "[920]\tvalid_0's multi_logloss: 0.865709\tvalid_0's weighted-f1-score: 0.688423\n",
      "[930]\tvalid_0's multi_logloss: 0.865689\tvalid_0's weighted-f1-score: 0.688417\n",
      "[940]\tvalid_0's multi_logloss: 0.865669\tvalid_0's weighted-f1-score: 0.688487\n",
      "[950]\tvalid_0's multi_logloss: 0.865609\tvalid_0's weighted-f1-score: 0.688349\n",
      "[960]\tvalid_0's multi_logloss: 0.865604\tvalid_0's weighted-f1-score: 0.688377\n",
      "[970]\tvalid_0's multi_logloss: 0.865595\tvalid_0's weighted-f1-score: 0.68832\n",
      "[980]\tvalid_0's multi_logloss: 0.865561\tvalid_0's weighted-f1-score: 0.688357\n",
      "[990]\tvalid_0's multi_logloss: 0.865536\tvalid_0's weighted-f1-score: 0.688396\n",
      "Early stopping, best iteration is:\n",
      "[898]\tvalid_0's multi_logloss: 0.865787\tvalid_0's weighted-f1-score: 0.688549\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=0.8,\n",
       "        importance_type='split', learning_rate=0.05, max_depth=1,\n",
       "        metric='multiclass', min_child_samples=50, min_child_weight=0.001,\n",
       "        min_split_gain=0.0, n_estimators=2000, n_jobs=-1, num_leaves=61,\n",
       "        objective='multiclass', random_state=2019, reg_alpha=0,\n",
       "        reg_lambda=0.01, silent=True, subsample=0.8,\n",
       "        subsample_for_bin=200000, subsample_freq=1)"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.metrics import f1_score\n",
    "from time import gmtime, strftime\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "# 模型评估，采用f1-score\n",
    "def f1_weighted(y_true, y_pred):\n",
    "    y_pred = y_pred.reshape(12, -1).T\n",
    "    y_pred = np.argmax(y_pred, axis=1)\n",
    "    score = f1_score(y_true, y_pred, average='weighted')\n",
    "    return 'weighted-f1-score', score, True\n",
    "\n",
    "# 切分训练集，以后七天的数据做验证集\n",
    "tra_x = train_x[train_x.req_time < '2018-11-24']\n",
    "tra_y = train_y[train_y.req_time < '2018-11-24']\n",
    "valid_x = train_x[train_x.req_time >= '2018-11-24']\n",
    "valid_y = train_y[train_y.req_time >= '2018-11-24']\n",
    "\n",
    "tra_x = tra_x.drop(['req_time'], axis=1)\n",
    "tra_y = tra_y.drop(['req_time'], axis=1)\n",
    "valid_x = valid_x.drop(['req_time'], axis=1)\n",
    "valid_y = valid_y.drop(['req_time'], axis=1)\n",
    "\n",
    "categorical_feature = ['max_dis_mode', 'min_dis_mode', 'max_price_mode', 'min_price_mode',\n",
    "                       'max_eta_mode', 'min_eta_mode', 'first_mode', 'o_d', 'pid_od_counts']\n",
    "\n",
    "lgb = LGBMClassifier(boosting_type='gbdt', num_leaves=61, objective='multiclass', reg_alpha=0, reg_lambda=0.01, max_depth=1, \n",
    "                    n_estimators=2000, subsample=0.8, colsample_bytree=0.8, subsample_freq=1, min_child_samples=50,\n",
    "                    learning_rate=0.05, random_state=2019, metric='multiclass', n_jobs=-1)\n",
    "eval_set = [(valid_x, valid_y)]\n",
    "lgb.fit(tra_x, tra_y, eval_set=eval_set, eval_metric=f1_weighted, categorical_feature=categorical_feature, verbose=10, early_stopping_rounds=100)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 特征重要度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>feature</th>\n",
       "      <th>imp</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>o_d</td>\n",
       "      <td>4258</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>45</th>\n",
       "      <td>first_mode</td>\n",
       "      <td>720</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>115</th>\n",
       "      <td>pid_d_counts</td>\n",
       "      <td>248</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>mode_feas_2</td>\n",
       "      <td>243</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>mode_feas_10</td>\n",
       "      <td>237</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>mode_feas_9</td>\n",
       "      <td>236</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>mode_feas_11</td>\n",
       "      <td>230</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>mode_feas_1</td>\n",
       "      <td>225</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>mode_feas_7</td>\n",
       "      <td>221</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>mode_feas_8</td>\n",
       "      <td>196</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>46</th>\n",
       "      <td>p0</td>\n",
       "      <td>188</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>min_distance</td>\n",
       "      <td>184</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>mode_feas_5</td>\n",
       "      <td>173</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>116</th>\n",
       "      <td>pid_od_counts</td>\n",
       "      <td>162</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>mode_feas_6</td>\n",
       "      <td>160</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>svd_mode_0</td>\n",
       "      <td>146</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>svd_mode_5</td>\n",
       "      <td>145</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>38</th>\n",
       "      <td>std_eta</td>\n",
       "      <td>131</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>114</th>\n",
       "      <td>pid_o_counts</td>\n",
       "      <td>129</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>43</th>\n",
       "      <td>max_eta_mode</td>\n",
       "      <td>125</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>mode_feas_4</td>\n",
       "      <td>118</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>mode_feas_3</td>\n",
       "      <td>111</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>113</th>\n",
       "      <td>hourofday</td>\n",
       "      <td>109</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>svd_mode_2</td>\n",
       "      <td>99</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>d2</td>\n",
       "      <td>97</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>svd_mode_3</td>\n",
       "      <td>97</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>svd_mode_4</td>\n",
       "      <td>96</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44</th>\n",
       "      <td>min_eta_mode</td>\n",
       "      <td>93</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>76</th>\n",
       "      <td>p30</td>\n",
       "      <td>82</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>svd_mode_6</td>\n",
       "      <td>75</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>55</th>\n",
       "      <td>p9</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>91</th>\n",
       "      <td>p45</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>96</th>\n",
       "      <td>p50</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>95</th>\n",
       "      <td>p49</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>58</th>\n",
       "      <td>p12</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>59</th>\n",
       "      <td>p13</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>60</th>\n",
       "      <td>p14</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>62</th>\n",
       "      <td>p16</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>64</th>\n",
       "      <td>p18</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>65</th>\n",
       "      <td>p19</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>66</th>\n",
       "      <td>p20</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>67</th>\n",
       "      <td>p21</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>69</th>\n",
       "      <td>p23</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>70</th>\n",
       "      <td>p24</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>71</th>\n",
       "      <td>p25</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>73</th>\n",
       "      <td>p27</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>74</th>\n",
       "      <td>p28</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>49</th>\n",
       "      <td>p3</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>84</th>\n",
       "      <td>p38</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>85</th>\n",
       "      <td>p39</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>86</th>\n",
       "      <td>p40</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>87</th>\n",
       "      <td>p41</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>88</th>\n",
       "      <td>p42</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>89</th>\n",
       "      <td>p43</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>90</th>\n",
       "      <td>p44</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>57</th>\n",
       "      <td>p11</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>92</th>\n",
       "      <td>p46</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>93</th>\n",
       "      <td>p47</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>94</th>\n",
       "      <td>p48</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>63</th>\n",
       "      <td>p17</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>127 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "           feature   imp\n",
       "4              o_d  4258\n",
       "45      first_mode   720\n",
       "115   pid_d_counts   248\n",
       "17     mode_feas_2   243\n",
       "25    mode_feas_10   237\n",
       "24     mode_feas_9   236\n",
       "26    mode_feas_11   230\n",
       "16     mode_feas_1   225\n",
       "22     mode_feas_7   221\n",
       "23     mode_feas_8   196\n",
       "46              p0   188\n",
       "28    min_distance   184\n",
       "20     mode_feas_5   173\n",
       "116  pid_od_counts   162\n",
       "21     mode_feas_6   160\n",
       "5       svd_mode_0   146\n",
       "10      svd_mode_5   145\n",
       "38         std_eta   131\n",
       "114   pid_o_counts   129\n",
       "43    max_eta_mode   125\n",
       "19     mode_feas_4   118\n",
       "18     mode_feas_3   111\n",
       "113      hourofday   109\n",
       "7       svd_mode_2    99\n",
       "3               d2    97\n",
       "8       svd_mode_3    97\n",
       "9       svd_mode_4    96\n",
       "44    min_eta_mode    93\n",
       "76             p30    82\n",
       "11      svd_mode_6    75\n",
       "..             ...   ...\n",
       "55              p9     0\n",
       "91             p45     0\n",
       "96             p50     0\n",
       "95             p49     0\n",
       "58             p12     0\n",
       "59             p13     0\n",
       "60             p14     0\n",
       "62             p16     0\n",
       "64             p18     0\n",
       "65             p19     0\n",
       "66             p20     0\n",
       "67             p21     0\n",
       "69             p23     0\n",
       "70             p24     0\n",
       "71             p25     0\n",
       "73             p27     0\n",
       "74             p28     0\n",
       "49              p3     0\n",
       "84             p38     0\n",
       "85             p39     0\n",
       "86             p40     0\n",
       "87             p41     0\n",
       "88             p42     0\n",
       "89             p43     0\n",
       "90             p44     0\n",
       "57             p11     0\n",
       "92             p46     0\n",
       "93             p47     0\n",
       "94             p48     0\n",
       "63             p17     0\n",
       "\n",
       "[127 rows x 2 columns]"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "imp = pd.DataFrame()\n",
    "imp['feature'] = tra_x.columns\n",
    "imp['imp'] = lgb.feature_importances_\n",
    "imp = imp.sort_values('imp', ascending = False)\n",
    "imp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pre = lgb.predict(valid_x)\n",
    "f1_score(valid_y, pre, average='weighted')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 提交结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = train_x.drop(['req_time'], axis=1)\n",
    "y = train_y.drop(['req_time'], axis=1)\n",
    "\n",
    "lgb.n_estimators = lgb.best_iteration_\n",
    "lgb.fit(x, y, categorical_feature=categorical_feature)\n",
    "pred_test = lgb.predict(test_x)\n",
    "\n",
    "# 提交结果\n",
    "now_time = strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n",
    "submit['recommend_mode'] = pred_test\n",
    "submit.to_csv('submission_{}.csv'.format(now_time), index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 五折"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_test_split(all_data):\n",
    "    train_data = all_data[all_data['click_mode']!=-1]\n",
    "    test_data = all_data[all_data['click_mode']==-1]\n",
    "    test_data = test_data.drop(['click_mode'], axis=1)\n",
    "    submit = test_data[['sid']].copy()\n",
    "    \n",
    "    train_data = train_data.drop(['sid', 'pid', 'req_time'], axis=1)\n",
    "    train_y = train_data['click_mode']\n",
    "    train_x = train_data.drop(['click_mode'], axis=1)\n",
    "    test_x = test_data.drop(['sid','pid','req_time'], axis=1)\n",
    "    \n",
    "    return train_x, train_y, test_x, submit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_x, train_y, test_x, submit = train_test_split(all_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\SoftWare\\Anaconda\\lib\\site-packages\\lightgbm\\basic.py:1205: UserWarning: Using categorical_feature in Dataset.\n",
      "  warnings.warn('Using categorical_feature in Dataset.')\n",
      "E:\\SoftWare\\Anaconda\\lib\\site-packages\\lightgbm\\basic.py:762: UserWarning: categorical_feature in param dict is overridden.\n",
      "  warnings.warn('categorical_feature in param dict is overridden.')\n",
      "E:\\SoftWare\\Anaconda\\lib\\site-packages\\sklearn\\metrics\\classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.0114\tvalid_0's weighted-f1-score: 0.670678\n",
      "[100]\tvalid_0's multi_logloss: 0.918214\tvalid_0's weighted-f1-score: 0.67451\n",
      "[150]\tvalid_0's multi_logloss: 0.898114\tvalid_0's weighted-f1-score: 0.675553\n",
      "[200]\tvalid_0's multi_logloss: 0.893153\tvalid_0's weighted-f1-score: 0.675829\n",
      "[250]\tvalid_0's multi_logloss: 0.892047\tvalid_0's weighted-f1-score: 0.676257\n",
      "[300]\tvalid_0's multi_logloss: 0.892111\tvalid_0's weighted-f1-score: 0.676608\n",
      "Early stopping, best iteration is:\n",
      "[260]\tvalid_0's multi_logloss: 0.892\tvalid_0's weighted-f1-score: 0.676427\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.00734\tvalid_0's weighted-f1-score: 0.672151\n",
      "[100]\tvalid_0's multi_logloss: 0.913239\tvalid_0's weighted-f1-score: 0.675563\n",
      "[150]\tvalid_0's multi_logloss: 0.893202\tvalid_0's weighted-f1-score: 0.676754\n",
      "[200]\tvalid_0's multi_logloss: 0.888276\tvalid_0's weighted-f1-score: 0.676834\n",
      "[250]\tvalid_0's multi_logloss: 0.887232\tvalid_0's weighted-f1-score: 0.677156\n",
      "[300]\tvalid_0's multi_logloss: 0.887455\tvalid_0's weighted-f1-score: 0.676962\n",
      "Early stopping, best iteration is:\n",
      "[255]\tvalid_0's multi_logloss: 0.887208\tvalid_0's weighted-f1-score: 0.677016\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.00941\tvalid_0's weighted-f1-score: 0.670193\n",
      "[100]\tvalid_0's multi_logloss: 0.916061\tvalid_0's weighted-f1-score: 0.673786\n",
      "[150]\tvalid_0's multi_logloss: 0.89616\tvalid_0's weighted-f1-score: 0.675127\n",
      "[200]\tvalid_0's multi_logloss: 0.8913\tvalid_0's weighted-f1-score: 0.675448\n",
      "[250]\tvalid_0's multi_logloss: 0.890061\tvalid_0's weighted-f1-score: 0.675546\n",
      "Early stopping, best iteration is:\n",
      "[241]\tvalid_0's multi_logloss: 0.890201\tvalid_0's weighted-f1-score: 0.6757\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.01287\tvalid_0's weighted-f1-score: 0.669423\n",
      "[100]\tvalid_0's multi_logloss: 0.919526\tvalid_0's weighted-f1-score: 0.673221\n",
      "[150]\tvalid_0's multi_logloss: 0.899384\tvalid_0's weighted-f1-score: 0.674443\n",
      "[200]\tvalid_0's multi_logloss: 0.894069\tvalid_0's weighted-f1-score: 0.67488\n",
      "[250]\tvalid_0's multi_logloss: 0.892684\tvalid_0's weighted-f1-score: 0.675284\n",
      "[300]\tvalid_0's multi_logloss: 0.892686\tvalid_0's weighted-f1-score: 0.675358\n",
      "Early stopping, best iteration is:\n",
      "[266]\tvalid_0's multi_logloss: 0.892608\tvalid_0's weighted-f1-score: 0.675021\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import lightgbm as lgb\n",
    "from sklearn.metrics import f1_score\n",
    "from time import gmtime, strftime\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "def f1_weighted(y_pred, train_data):\n",
    "    y_true = train_data.label\n",
    "    y_pred = y_pred.reshape(12, -1).T\n",
    "    y_pred = np.argmax(y_pred, axis=1)\n",
    "    f1 = f1_score(y_true, y_pred, average='weighted')\n",
    "    return 'weighted-f1-score', f1, True\n",
    "\n",
    "kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019)\n",
    "\n",
    "lgb_paras = {\n",
    "    'objective': 'multiclass',\n",
    "    'metrics': 'multiclass',\n",
    "    'learning_rate': 0.05,\n",
    "    'num_leaves': 31,\n",
    "    'lambda_l1': 0.01,\n",
    "    'lambda_l2': 10,\n",
    "    'num_class': 12,\n",
    "    'seed': 2019,\n",
    "    'feature_fraction': 0.8,\n",
    "    'bagging_fraction': 0.8,\n",
    "    'bagging_freq': 4\n",
    "}\n",
    "\n",
    "categorical_feature = ['max_dis_mode', 'min_dis_mode', 'max_price_mode', 'min_price_mode',\n",
    "                       'max_eta_mode', 'min_eta_mode', 'first_mode', 'o_d', 'pid_od_counts']\n",
    "scores = []\n",
    "result_proba = []\n",
    "for tra_idx, val_idx in kfold.split(train_x, train_y):\n",
    "    tra_x, tra_y, val_x, val_y = train_x.iloc[tra_idx], train_y[tra_idx], train_x.iloc[val_idx], train_y[val_idx]\n",
    "    train_set = lgb.Dataset(tra_x, tra_y, categorical_feature=categorical_feature)\n",
    "    val_set = lgb.Dataset(val_x, val_y, categorical_feature=categorical_feature)\n",
    "    lgb_model = lgb.train(lgb_paras, train_set, valid_sets=[val_set], early_stopping_rounds=50, num_boost_round=40000, verbose_eval=50, feval=f1_weighted)\n",
    "    val_pred = np.argmax(lgb_model.predict(val_x, num_iteration=lgb_model.best_iteration), axis=1)\n",
    "    val_score = f1_score(val_y, val_pred, average='weighted')\n",
    "    result_proba.append(lgb_model.predict(test_x, num_iteration=lgb_model.best_iteration))\n",
    "    scores.append(val_score)\n",
    "print('cv f1_score:', np.mean(scores))\n",
    "pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)\n",
    "\n",
    "# 提交结果\n",
    "now_time = strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n",
    "submit['recommend_mode'] = pred_test\n",
    "submit.to_csv('submission_{}.csv'.format(now_time), index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
