{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import datetime\n",
    "from tqdm import tqdm\n",
    "from geopy.distance import geodesic\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 一、特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1、合并训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def merge_data():\n",
    "    # 标签\n",
    "    train_clicks = pd.read_csv(\"data_set_phase1//train_clicks.csv\")\n",
    "\n",
    "    # 特征数据集\n",
    "    train_plans = pd.read_csv(\"data_set_phase1//train_plans.csv\")\n",
    "    train_queries = pd.read_csv(\"data_set_phase1//train_queries.csv\")\n",
    "    test_plans = pd.read_csv(\"data_set_phase1//test_plans.csv\")\n",
    "    test_queries = pd.read_csv(\"data_set_phase1//test_queries.csv\")\n",
    "\n",
    "    # merge训练集\n",
    "    tra_data = train_queries.merge(train_plans, on='sid', how='left')\n",
    "    tra_data = tra_data.merge(train_clicks, on='sid', how='left')\n",
    "    tra_data = tra_data.drop(['click_time'], axis=1)\n",
    "    tra_data['click_mode'] = tra_data['click_mode'].fillna(0)\n",
    "\n",
    "    # merge测试集\n",
    "    tes_data = test_queries.merge(test_plans, on='sid', how='left')\n",
    "    tes_data['click_mode'] = -1\n",
    "\n",
    "    # concat训练集和测试集\n",
    "    all_data = pd.concat([tra_data, tes_data], axis=0)\n",
    "    all_data = all_data.drop(['plan_time'], axis=1)\n",
    "    all_data = all_data.reset_index(drop=True)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2、抽取o、d的特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 将o、d分离，添加POI数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_od_feature(all_data):\n",
    "    all_data['o1'] = all_data['o'].apply(lambda x : float(x.split(',')[0]))\n",
    "    all_data['o2'] = all_data['o'].apply(lambda x : float(x.split(',')[1]))\n",
    "    all_data['d1'] = all_data['d'].apply(lambda x : float(x.split(',')[0]))\n",
    "    all_data['d2'] = all_data['d'].apply(lambda x : float(x.split(',')[1]))\n",
    "\n",
    "    # od对结合，并labelencoder\n",
    "    le = LabelEncoder()\n",
    "    all_data['o_d'] = all_data['o'] + all_data['d']\n",
    "    all_data['o_d'] = le.fit_transform(all_data['o_d'])\n",
    "    \n",
    "    # 经纬度距离\n",
    "    all_data['o_d_distance'] = all_data.apply(lambda x: geodesic((x.o2, x.o1),(x.d2, x.d1)).m, axis=1)\n",
    "    \n",
    "#     POI_data = pd.read_csv(\"data_set_phase1//POIs.csv\", encoding='ANSI')\n",
    "#     POIs = pd.DataFrame()\n",
    "#     POIs.columns = POI_data['tag'].value_counts().index\n",
    "    \n",
    "#    all_data = all_data.drop(['o', 'd'], axis=1)\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>lng_lat</th>\n",
       "      <th>addr</th>\n",
       "      <th>cp</th>\n",
       "      <th>direction</th>\n",
       "      <th>distance</th>\n",
       "      <th>name</th>\n",
       "      <th>parent_poi</th>\n",
       "      <th>poiType</th>\n",
       "      <th>point</th>\n",
       "      <th>tag</th>\n",
       "      <th>tel</th>\n",
       "      <th>uid</th>\n",
       "      <th>zip</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>116.29,39.97</td>\n",
       "      <td>北京市海淀区蓝靛厂居住区世纪城3期春荫园6号楼</td>\n",
       "      <td></td>\n",
       "      <td>北</td>\n",
       "      <td>102</td>\n",
       "      <td>中国建设银行(北京远大中路支行)</td>\n",
       "      <td>{'name': '', 'tag': '', 'addr': '', 'point': {...</td>\n",
       "      <td>金融</td>\n",
       "      <td>{'x': 116.28983435050277, 'y': 39.96930388943203}</td>\n",
       "      <td>金融;银行</td>\n",
       "      <td>NaN</td>\n",
       "      <td>d4034eb38a6c2ff6c0364441</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>116.29,39.97</td>\n",
       "      <td>蓝靛厂中路19号</td>\n",
       "      <td></td>\n",
       "      <td>西南</td>\n",
       "      <td>203</td>\n",
       "      <td>蓝靛厂清真寺</td>\n",
       "      <td>{'name': '', 'tag': '', 'addr': '', 'point': {...</td>\n",
       "      <td>旅游景点</td>\n",
       "      <td>{'x': 116.29121774098813, 'y': 39.971045708291...</td>\n",
       "      <td>旅游景点;教堂</td>\n",
       "      <td>NaN</td>\n",
       "      <td>5191bb9a6696551b1ce99987</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>116.29,39.97</td>\n",
       "      <td>北京市海淀区蓝靛厂春荫园小区5号楼</td>\n",
       "      <td></td>\n",
       "      <td>东北</td>\n",
       "      <td>233</td>\n",
       "      <td>蓝靛厂春荫园</td>\n",
       "      <td>{'name': '', 'tag': '', 'addr': '', 'point': {...</td>\n",
       "      <td>房地产</td>\n",
       "      <td>{'x': 116.2883162141909, 'y': 39.969034318229845}</td>\n",
       "      <td>房地产;住宅区</td>\n",
       "      <td>NaN</td>\n",
       "      <td>34f68ab038f0530a8e55ecac</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>116.29,39.97</td>\n",
       "      <td>北京市海淀区蓝靛厂翠叠园小区9号楼</td>\n",
       "      <td></td>\n",
       "      <td>西北</td>\n",
       "      <td>250</td>\n",
       "      <td>蓝靛厂翠叠园</td>\n",
       "      <td>{'name': '', 'tag': '', 'addr': '', 'point': {...</td>\n",
       "      <td>房地产</td>\n",
       "      <td>{'x': 116.29195435150632, 'y': 39.96913799958826}</td>\n",
       "      <td>房地产;住宅区</td>\n",
       "      <td>NaN</td>\n",
       "      <td>e93761f6bb9d9572223bf270</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>116.29,39.97</td>\n",
       "      <td>蓝晴路与蓝靛厂中路交叉口西北150米</td>\n",
       "      <td></td>\n",
       "      <td>东南</td>\n",
       "      <td>161</td>\n",
       "      <td>曙光街道温馨家园</td>\n",
       "      <td>{'name': '', 'tag': '', 'addr': '', 'point': {...</td>\n",
       "      <td>房地产</td>\n",
       "      <td>{'x': 116.28907977387439, 'y': 39.970859086982...</td>\n",
       "      <td>房地产;住宅区</td>\n",
       "      <td>NaN</td>\n",
       "      <td>7bcfc17f6fa9933e077ce8aa</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        lng_lat                     addr cp direction  distance  \\\n",
       "0  116.29,39.97  北京市海淀区蓝靛厂居住区世纪城3期春荫园6号楼            北       102   \n",
       "1  116.29,39.97                 蓝靛厂中路19号           西南       203   \n",
       "2  116.29,39.97        北京市海淀区蓝靛厂春荫园小区5号楼           东北       233   \n",
       "3  116.29,39.97        北京市海淀区蓝靛厂翠叠园小区9号楼           西北       250   \n",
       "4  116.29,39.97       蓝晴路与蓝靛厂中路交叉口西北150米           东南       161   \n",
       "\n",
       "               name                                         parent_poi  \\\n",
       "0  中国建设银行(北京远大中路支行)  {'name': '', 'tag': '', 'addr': '', 'point': {...   \n",
       "1            蓝靛厂清真寺  {'name': '', 'tag': '', 'addr': '', 'point': {...   \n",
       "2            蓝靛厂春荫园  {'name': '', 'tag': '', 'addr': '', 'point': {...   \n",
       "3            蓝靛厂翠叠园  {'name': '', 'tag': '', 'addr': '', 'point': {...   \n",
       "4          曙光街道温馨家园  {'name': '', 'tag': '', 'addr': '', 'point': {...   \n",
       "\n",
       "  poiType                                              point      tag  tel  \\\n",
       "0      金融  {'x': 116.28983435050277, 'y': 39.96930388943203}    金融;银行  NaN   \n",
       "1    旅游景点  {'x': 116.29121774098813, 'y': 39.971045708291...  旅游景点;教堂  NaN   \n",
       "2     房地产  {'x': 116.2883162141909, 'y': 39.969034318229845}  房地产;住宅区  NaN   \n",
       "3     房地产  {'x': 116.29195435150632, 'y': 39.96913799958826}  房地产;住宅区  NaN   \n",
       "4     房地产  {'x': 116.28907977387439, 'y': 39.970859086982...  房地产;住宅区  NaN   \n",
       "\n",
       "                        uid  zip  \n",
       "0  d4034eb38a6c2ff6c0364441  NaN  \n",
       "1  5191bb9a6696551b1ce99987  NaN  \n",
       "2  34f68ab038f0530a8e55ecac  NaN  \n",
       "3  e93761f6bb9d9572223bf270  NaN  \n",
       "4  7bcfc17f6fa9933e077ce8aa  NaN  "
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "POI_data = pd.read_csv(\"data_set_phase1//POIs.csv\", encoding='ANSI')\n",
    "POI_data.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "房地产     4\n",
       "金融      2\n",
       "购物      1\n",
       "教育培训    1\n",
       "汽车服务    1\n",
       "旅游景点    1\n",
       "Name: poiType, dtype: int64"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "POI_data.poiType[POI_data['lng_lat']=='116.29,39.97'].value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3、抽取plans的特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 提取plans特征\n",
    "### 1、max_distance、min_distance、mean_distance、std_distance\n",
    "### 2、max_price、min_price、mean_price、std_price\n",
    "### 3、max_eta、min_eta、mean_eta、std_eta\n",
    "### 4、max_dis_mode、min_dis_mode、max_price_mode、min_price_mode、max_eta_mode、min_eta_mode\n",
    "### 5、first_mode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_plan_feature(all_data):\n",
    "    n = all_data.shape[0]\n",
    "    \n",
    "    # 初始化推荐给用户的plans，类似于one-hot编码，推荐了哪一个mode，就置为1\n",
    "    mode_list_feas = np.zeros((n, 12))\n",
    "\n",
    "    # 初始化最大距离、最小距离、平均距离、距离标准差\n",
    "    max_distance, min_distance, mean_distance, std_distance = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大价格、最小价格、平均价格、价格标准差\n",
    "    max_price, min_price, mean_price, std_price = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大用时、最小用时、平均用时、用时标准差\n",
    "    max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大距离mode、最小距离mode、最大价格mode、最小价格mode、最大用时mode、最小用时mode、第一推荐mode\n",
    "    max_dis_mode, min_dis_mode, max_price_mode, min_price_mode, max_eta_mode, min_eta_mode, first_mode = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化推荐mode的顺序\n",
    "    mode_texts=[]\n",
    "    \n",
    "    # 遍历每个用户的plan\n",
    "    for i, plan in tqdm(enumerate(all_data['plans'].values)):\n",
    "        try:\n",
    "            user_plan_list = json.loads(plan)\n",
    "        except:\n",
    "            user_plan_list = []\n",
    "        if len(user_plan_list)==0:\n",
    "            mode_list_feas[i, 0] = 1\n",
    "\n",
    "            first_mode[i] = 0\n",
    "\n",
    "            max_distance[i] = -1\n",
    "            min_distance[i] = -1\n",
    "            mean_distance[i] = -1\n",
    "            std_distance[i] = -1\n",
    "\n",
    "            max_price[i] = -1\n",
    "            min_price[i] = -1\n",
    "            mean_price[i] = -1\n",
    "            std_price[i] = -1\n",
    "\n",
    "            max_eta[i] = -1\n",
    "            min_eta[i] = -1\n",
    "            mean_eta[i] = -1\n",
    "            std_eta[i] = -1\n",
    "\n",
    "            max_dis_mode[i] = -1\n",
    "            min_dis_mode[i] = -1\n",
    "            max_price_mode[i] = -1\n",
    "            min_price_mode[i] = -1\n",
    "            max_eta_mode[i] = -1\n",
    "            min_eta_mode[i] = -1\n",
    "\n",
    "            mode_texts.append('word_null')\n",
    "        else:\n",
    "            distance_list = []\n",
    "            price_list = []\n",
    "            eta_list = []\n",
    "            mode_list = []\n",
    "\n",
    "            # 抽取每个用户的每个plan\n",
    "            for tmp_dict in user_plan_list:\n",
    "                distance_list.append(int(tmp_dict['distance']))\n",
    "                if tmp_dict['price']=='':\n",
    "                    price_list.append(0)\n",
    "                else:\n",
    "                    price_list.append(int(tmp_dict['price']))\n",
    "                eta_list.append(int(tmp_dict['eta']))\n",
    "                mode_list.append(int(tmp_dict['transport_mode']))\n",
    "\n",
    "            # 将每个用户的推荐模型按顺序添加\n",
    "            mode_texts.append(' '.join(['word_{}'.format(mode) for mode in mode_list]))\n",
    "\n",
    "            # 将list转换成ndarray\n",
    "            distance_list = np.array(distance_list)\n",
    "            price_list = np.array(price_list)\n",
    "            eta_list = np.array(eta_list)\n",
    "            mode_list = np.array(mode_list, dtype='int')\n",
    "            \n",
    "            # 将有plans推荐的用户的mode置为1\n",
    "            mode_list_feas[i, mode_list] = 1\n",
    "\n",
    "            # 获取索引\n",
    "            distance_sort_idx = np.argsort(distance_list)\n",
    "            price_sort_idx = np.argsort(price_list)\n",
    "            eta_sort_idx = np.argsort(eta_list)\n",
    "\n",
    "            # 构建特征\n",
    "            max_distance[i] = distance_list[distance_sort_idx[-1]]\n",
    "            min_distance[i] = distance_list[distance_sort_idx[0]]\n",
    "            mean_distance[i] = np.mean(distance_list)\n",
    "            std_distance[i] = np.std(distance_list)\n",
    "\n",
    "            max_price[i] = price_list[price_sort_idx[-1]]\n",
    "            min_price[i] = price_list[price_sort_idx[0]]\n",
    "            mean_price[i] = np.mean(price_list)\n",
    "            std_price[i] = np.std(price_list)\n",
    "\n",
    "            max_eta[i] = eta_list[eta_sort_idx[-1]]\n",
    "            min_eta[i] = eta_list[eta_sort_idx[0]]\n",
    "            mean_eta[i] = np.mean(eta_list)\n",
    "            std_eta[i] = np.std(eta_list)\n",
    "\n",
    "            first_mode[i] = mode_list[0]\n",
    "\n",
    "            max_dis_mode[i] = mode_list[distance_sort_idx[-1]]\n",
    "            min_dis_mode[i] = mode_list[distance_sort_idx[0]]\n",
    "\n",
    "            max_price_mode[i] = mode_list[price_sort_idx[-1]]\n",
    "            min_price_mode[i] = mode_list[price_sort_idx[0]]\n",
    "\n",
    "            max_eta_mode[i] = mode_list[eta_sort_idx[-1]]\n",
    "            min_eta_mode[i] = mode_list[eta_sort_idx[0]]\n",
    "\n",
    "    # 将特征存储进DataFrame中\n",
    "    plan_feature_data = pd.DataFrame(mode_list_feas)\n",
    "    plan_feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]\n",
    "\n",
    "    plan_feature_data['max_distance'] = max_distance\n",
    "    plan_feature_data['min_distance'] = min_distance\n",
    "    plan_feature_data['mean_distance'] = mean_distance\n",
    "    plan_feature_data['std_distance'] = std_distance\n",
    "\n",
    "    plan_feature_data['max_price'] = max_price\n",
    "    plan_feature_data['min_price'] = min_price\n",
    "    plan_feature_data['mean_price'] = mean_price\n",
    "    plan_feature_data['std_price'] = std_price\n",
    "\n",
    "    plan_feature_data['max_eta'] = max_eta\n",
    "    plan_feature_data['min_eta'] = min_eta\n",
    "    plan_feature_data['mean_eta'] = mean_eta\n",
    "    plan_feature_data['std_eta'] = std_eta\n",
    "\n",
    "    plan_feature_data['max_dis_mode'] = max_dis_mode\n",
    "    plan_feature_data['min_dis_mode'] = min_dis_mode\n",
    "    plan_feature_data['max_price_mode'] = max_price_mode\n",
    "    plan_feature_data['min_price_mode'] = min_price_mode\n",
    "    plan_feature_data['max_eta_mode'] = max_eta_mode\n",
    "    plan_feature_data['min_eta_mode'] = min_eta_mode\n",
    "\n",
    "    plan_feature_data['first_mode'] = first_mode\n",
    "\n",
    "    # tiidf提取特征\n",
    "    tfidf = TfidfVectorizer(ngram_range=(1, 2))\n",
    "    tfidf_vec = tfidf.fit_transform(mode_texts)\n",
    "    svd = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)\n",
    "    mode_svd = svd.fit_transform(tfidf_vec)\n",
    "    \n",
    "    # 转换成dataframe\n",
    "    mode_svd = pd.DataFrame(mode_svd)\n",
    "    mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]\n",
    "\n",
    "    all_data = pd.concat([all_data, mode_svd, plan_feature_data], axis=1)\n",
    "    all_data = all_data.drop(['plans'], axis=1)\n",
    "    \n",
    "    return  all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4、抽取profiles数据集特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_profiles_feature(all_data):\n",
    "    profiles = pd.read_csv(\"data_set_phase1//profiles.csv\")\n",
    "\n",
    "    # 用于填充没有pid的用户\n",
    "    profiles_na = np.zeros(67)\n",
    "    profiles_na[0] = -1\n",
    "    profiles_na = pd.DataFrame(profiles_na.reshape(1, -1))\n",
    "    profiles_na.columns = profiles.columns\n",
    "    profiles = profiles.append(profiles_na)\n",
    "    \n",
    "#     # 对特征进行奇异值分解，实现降维\n",
    "#     pi = profiles.drop(['pid'], axis=1).values\n",
    "#     svd = TruncatedSVD(n_components=60, n_iter=20, random_state=2019)\n",
    "#     profiles_svd = svd.fit_transform(pi)\n",
    "    \n",
    "#     # 转换成dataframe\n",
    "#     profiles_svd = pd.DataFrame(profiles_svd)\n",
    "#     profiles_svd.columns = ['svd_profiles_{}'.format(i) for i in range(60)]\n",
    "#     profiles_svd['pid'] = profiles['pid'].values\n",
    "\n",
    "    # 合并数据集\n",
    "    all_data['pid'] = all_data['pid'].fillna(-1)\n",
    "    all_data = all_data.merge(profiles, on='pid', how='left')\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 5、抽取时间特征（req_time）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 距离国庆节的天数、月份、一年中第几天、周几、小时、小时cat、是否是假期、是否是周末"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_time_feature(all_data):\n",
    "    \n",
    "    # 距离国庆还有几天\n",
    "    NatioinalDay = []\n",
    "    d1 = datetime.datetime(2018,10,1)\n",
    "    for i in range(all_data.shape[0]):\n",
    "        d2 = datetime.datetime.strptime(all_data.req_time[i].split(' ')[0], \"%Y-%m-%d\")\n",
    "        s = d2 - d1\n",
    "        NatioinalDay.append(s.days)\n",
    "    all_data['NatioinalDay'] = NatioinalDay\n",
    "    \n",
    "    all_data['req_time'] = pd.to_datetime(all_data['req_time'])\n",
    "    \n",
    "    # 查询日期\n",
    "    all_data['req_date'] = all_data['req_time'].dt.strftime(\"%m-%d\")\n",
    "    \n",
    "    # 是否是假期\n",
    "    all_data['isholiday'] = all_data['req_date'].isin(['10-01','10-02','10-03','10-04','10-05','10-06','10-07',]).astype(int)\n",
    "    \n",
    "    # 月份\n",
    "    all_data['monthofyear'] = all_data['req_time'].dt.month\n",
    "    \n",
    "    # 一年中的第几天\n",
    "    all_data['dayofyear'] = all_data['req_time'].dt.dayofyear\n",
    "    \n",
    "    # 周几\n",
    "    all_data['dayofweek'] = all_data['req_time'].dt.dayofweek\n",
    "    \n",
    "    # 是否是周末\n",
    "    all_data['isweekend']=0\n",
    "    all_data.isweekend[all_data['dayofweek']>4]=1\n",
    "    \n",
    "    # 小时\n",
    "    all_data['hour'] = all_data['req_time'].dt.hour\n",
    "    \n",
    "    # 小时category\n",
    "    all_data['cat_hour'] = all_data.hour.apply(lambda x: 0 if x<=6 else 1 if x<=12 else 2 if x<=18 else 3)\n",
    "    \n",
    "    all_data = all_data.drop(['req_date'], axis=1)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 6、提取pid特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 根据lgbm的特征重要度发现，pid是个强特，所以对pid进行特征提取\n",
    "### 1、统计每个pid出现的次数，将次数作为特征\n",
    "### 2、统计每个pid在每个类别中出现的次数（这个有问题）\n",
    "### 3、pid与时间特征的组合出现的次数\n",
    "### 4、pid与时间特征的组合在每个类别中出现的次数（这个有问题）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_pid_feature(all_data):\n",
    "    \n",
    "    # 统计每个pid出现的次数\n",
    "    pid_counts = pd.DataFrame()\n",
    "    counts = all_data['pid'].value_counts()\n",
    "    index = counts.index\n",
    "    pid_counts['pid'] = index\n",
    "    pid_counts['pid_counts'] = list(counts)\n",
    "    \n",
    "    # pid与o组合出现的次数\n",
    "    grouped = all_data['o'].groupby(all_data['pid'])\n",
    "    pidCombineO = grouped.value_counts()\n",
    "    pidCombineO.to_csv('tidy//pidCombineO.csv')\n",
    "    pidCombineO = pd.read_csv('tidy//pidCombineO.csv')\n",
    "    pidCombineO.columns = ['pid', 'o', 'pid_o_counts']\n",
    "    all_data = all_data.merge(pidCombineO, on=['pid', 'o'], how='left')\n",
    "    \n",
    "    # pid与d组合出现的次数\n",
    "    grouped = all_data['d'].groupby(all_data['pid'])\n",
    "    pidCombineD = grouped.value_counts()\n",
    "    pidCombineD.to_csv('tidy//pidCombineD.csv')\n",
    "    pidCombineD = pd.read_csv('tidy//pidCombineD.csv')\n",
    "    pidCombineD.columns = ['pid', 'd', 'pid_d_counts']\n",
    "    all_data = all_data.merge(pidCombineD, on=['pid', 'd'], how='left')\n",
    "    \n",
    "    # pid与o_d组合出现的次数\n",
    "    grouped = all_data['o_d'].groupby(all_data['pid'])\n",
    "    pidCombineOD = grouped.value_counts()\n",
    "    pidCombineOD.to_csv('tidy//pidCombineOD.csv')\n",
    "    pidCombineOD = pd.read_csv('tidy//pidCombineOD.csv')\n",
    "    pidCombineOD.columns = ['pid', 'o_d', 'pid_od_counts']\n",
    "    all_data = all_data.merge(pidCombineOD, on=['pid', 'o_d'], how='left')\n",
    "\n",
    "    # pid与first_mode组合出现次数\n",
    "    pidCombineFirstM0 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==0].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM0['pid'] = index\n",
    "    pidCombineFirstM0['pid_F0_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM1 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==1].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM1['pid'] = index\n",
    "    pidCombineFirstM1['pid_F1_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM2 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==2].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM2['pid'] = index\n",
    "    pidCombineFirstM2['pid_F2_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM3 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==3].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM3['pid'] = index\n",
    "    pidCombineFirstM3['pid_F3_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM4 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==4].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM4['pid'] = index\n",
    "    pidCombineFirstM4['pid_F4_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM5 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==5].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM5['pid'] = index\n",
    "    pidCombineFirstM5['pid_F5_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM6 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==6].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM6['pid'] = index\n",
    "    pidCombineFirstM6['pid_F6_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM7 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==7].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM7['pid'] = index\n",
    "    pidCombineFirstM7['pid_F7_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM8 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==8].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM8['pid'] = index\n",
    "    pidCombineFirstM8['pid_F8_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM9 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==9].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM9['pid'] = index\n",
    "    pidCombineFirstM9['pid_F9_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM10 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==10].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM10['pid'] = index\n",
    "    pidCombineFirstM10['pid_F10_counts'] = list(counts)\n",
    "    \n",
    "    pidCombineFirstM11 = pd.DataFrame()\n",
    "    counts = all_data.pid[all_data['first_mode']==11].value_counts()\n",
    "    index = counts.index\n",
    "    pidCombineFirstM11['pid'] = index\n",
    "    pidCombineFirstM11['pid_F11_counts'] = list(counts)\n",
    "    \n",
    "    # pid与时间特征的组合出现次数，按是否是节假日进行组合\n",
    "    pidCombineHoliday0 = pd.DataFrame()\n",
    "    counts_0 = all_data.pid[all_data['isholiday']==0].value_counts()\n",
    "    index_0 = counts_0.index\n",
    "    pidCombineHoliday0['pid'] = index_0\n",
    "    pidCombineHoliday0['pid_H0_counts'] = list(counts_0)\n",
    "    \n",
    "    pidCombineHoliday1 = pd.DataFrame()\n",
    "    counts_1 = all_data.pid[all_data['isholiday']==1].value_counts()\n",
    "    index_1 = counts_1.index\n",
    "    pidCombineHoliday1['pid'] = index_1\n",
    "    pidCombineHoliday1['pid_H1_counts'] = list(counts_1)\n",
    "    \n",
    "    # pid与时间特征的组合出现次数，按是否是周末进行组合\n",
    "    pidCombineWeekend0 = pd.DataFrame()\n",
    "    counts_0 = all_data.pid[all_data['isweekend']==0].value_counts()\n",
    "    index_0 = counts_0.index\n",
    "    pidCombineWeekend0['pid'] = index_0\n",
    "    pidCombineWeekend0['pid_W0_counts'] = list(counts_0)\n",
    "    \n",
    "    pidCombineWeekend1 = pd.DataFrame()\n",
    "    counts_1 = all_data.pid[all_data['isweekend']==1].value_counts()\n",
    "    index_1 = counts_1.index\n",
    "    pidCombineWeekend1['pid'] = index_1\n",
    "    pidCombineWeekend1['pid_W1_counts'] = list(counts_1)\n",
    "    \n",
    "    #合并dataframe\n",
    "    all_data = all_data.merge(pidCombineFirstM0, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM1, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM2, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM3, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM4, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM5, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM6, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM7, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM8, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM9, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM10, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineFirstM11, on='pid', how='left')\n",
    "    all_data = all_data.merge(pid_counts, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineHoliday0, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineHoliday1, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineWeekend0, on='pid', how='left')\n",
    "    all_data = all_data.merge(pidCombineWeekend1, on='pid', how='left')\n",
    "    \n",
    "    # 填充缺失值\n",
    "    all_data['pid_o_counts'] = all_data['pid_o_counts'].fillna(0)\n",
    "    all_data['pid_d_counts'] = all_data['pid_d_counts'].fillna(0)\n",
    "    all_data['pid_od_counts'] = all_data['pid_od_counts'].fillna(0)\n",
    "    all_data['pid_F0_counts'] = all_data['pid_F0_counts'].fillna(0)\n",
    "    all_data['pid_F1_counts'] = all_data['pid_F1_counts'].fillna(0)\n",
    "    all_data['pid_F2_counts'] = all_data['pid_F2_counts'].fillna(0)\n",
    "    all_data['pid_F3_counts'] = all_data['pid_F3_counts'].fillna(0)\n",
    "    all_data['pid_F4_counts'] = all_data['pid_F4_counts'].fillna(0)\n",
    "    all_data['pid_F5_counts'] = all_data['pid_F5_counts'].fillna(0)\n",
    "    all_data['pid_F6_counts'] = all_data['pid_F6_counts'].fillna(0)\n",
    "    all_data['pid_F7_counts'] = all_data['pid_F7_counts'].fillna(0)\n",
    "    all_data['pid_F8_counts'] = all_data['pid_F8_counts'].fillna(0)\n",
    "    all_data['pid_F9_counts'] = all_data['pid_F9_counts'].fillna(0)\n",
    "    all_data['pid_F10_counts'] = all_data['pid_F10_counts'].fillna(0)\n",
    "    all_data['pid_F11_counts'] = all_data['pid_F11_counts'].fillna(0)\n",
    "    all_data['pid_H0_counts'] = all_data['pid_H0_counts'].fillna(0)\n",
    "    all_data['pid_H1_counts'] = all_data['pid_H1_counts'].fillna(0)\n",
    "    all_data['pid_W0_counts'] = all_data['pid_W0_counts'].fillna(0)\n",
    "    all_data['pid_W1_counts'] = all_data['pid_W1_counts'].fillna(0)\n",
    "    \n",
    "    all_data = all_data.drop(['o', 'd'], axis=1)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 7、切分数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_test_split(all_data):\n",
    "    train_data = all_data[all_data['click_mode']!=-1]\n",
    "    test_data = all_data[all_data['click_mode']==-1]\n",
    "    test_data = test_data.drop(['click_mode'], axis=1)\n",
    "    submit = test_data[['sid']].copy()\n",
    "    \n",
    "    train_data = train_data.drop(['sid', 'pid'], axis=1)\n",
    "    train_y = train_data[['req_time','click_mode']]\n",
    "    train_x = train_data.drop(['click_mode'], axis=1)\n",
    "    test_x = test_data.drop(['sid','req_time','pid'], axis=1)\n",
    "    \n",
    "    return train_x, train_y, test_x, submit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "594358it [01:39, 5970.64it/s]\n",
      "E:\\SoftWare\\Anaconda\\lib\\site-packages\\ipykernel_launcher.py:31: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n"
     ]
    }
   ],
   "source": [
    "all_data = merge_data()\n",
    "all_data = gen_od_feature(all_data)\n",
    "all_data = gen_plan_feature(all_data)\n",
    "all_data = gen_profiles_feature(all_data)\n",
    "all_data = gen_time_feature(all_data)\n",
    "all_data = gen_pid_feature(all_data)\n",
    "train_x, train_y, test_x, submit = train_test_split(all_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "t_x = train_x[train_x.req_time >= '2018-10-08']\n",
    "t_y = train_y[train_y.req_time >= '2018-10-08']\n",
    "x = pd.concat([t_x, test_x], axis=0)\n",
    "x.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 8、国庆嫁接"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.metrics import f1_score\n",
    "from time import gmtime, strftime\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "# 模型评估，采用f1-score\n",
    "def f1_weighted(y_true, y_pred):\n",
    "    y_pred = y_pred.reshape(12, -1).T\n",
    "    y_pred = np.argmax(y_pred, axis=1)\n",
    "    score = f1_score(y_true, y_pred, average='weighted')\n",
    "    return 'weighted-f1-score', score, True\n",
    "\n",
    "# 以国庆节期间的数据做训练集\n",
    "tr_x = train_x[train_x.req_time < '2018-10-08']\n",
    "tr_y = train_y[train_y.req_time < '2018-10-08']\n",
    "tr_x = tr_x.drop(['req_time'], axis=1)\n",
    "tr_y = tr_y.drop(['req_time'], axis=1)\n",
    "\n",
    "# 以其他数据做测试集\n",
    "te_x = train_x[train_x.req_time >= '2018-10-08']\n",
    "te_x = te_x.drop(['req_time'], axis=1)\n",
    "te_x = pd.concat([te_x, test_x], axis=0)\n",
    "\n",
    "categorical_feature = ['pid', 'max_dis_mode', 'min_dis_mode', 'max_price_mode', 'min_price_mode',\n",
    "                       'max_eta_mode', 'min_eta_mode', 'first_mode']\n",
    "\n",
    "lgb = LGBMClassifier(boosting_type='gbdt', num_leaves=61, objective='multiclass', reg_alpha=0, reg_lambda=0.01, max_depth=1, \n",
    "                    n_estimators=2000, subsample=0.8, colsample_bytree=0.8, subsample_freq=1, min_child_samples=50,\n",
    "                    learning_rate=0.05, random_state=2019, metric='multiclass', n_jobs=-1)\n",
    "lgb.fit(tr_x, tr_y, categorical_feature=categorical_feature)\n",
    "\n",
    "y_hat = lgb.predict(te_x)\n",
    "y_pred = lgb.predict_proba(te_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_hat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 8、模型训练&验证&提交"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\SoftWare\\Anaconda\\lib\\site-packages\\sklearn\\metrics\\classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training until validation scores don't improve for 100 rounds.\n",
      "[10]\tvalid_0's multi_logloss: 1.55656\tvalid_0's weighted-f1-score: 0.601874\n",
      "[20]\tvalid_0's multi_logloss: 1.33319\tvalid_0's weighted-f1-score: 0.635709\n",
      "[30]\tvalid_0's multi_logloss: 1.20043\tvalid_0's weighted-f1-score: 0.665018\n",
      "[40]\tvalid_0's multi_logloss: 1.11322\tvalid_0's weighted-f1-score: 0.66732\n",
      "[50]\tvalid_0's multi_logloss: 1.05558\tvalid_0's weighted-f1-score: 0.669266\n",
      "[60]\tvalid_0's multi_logloss: 1.01372\tvalid_0's weighted-f1-score: 0.670729\n",
      "[70]\tvalid_0's multi_logloss: 0.983093\tvalid_0's weighted-f1-score: 0.672913\n",
      "[80]\tvalid_0's multi_logloss: 0.9601\tvalid_0's weighted-f1-score: 0.673723\n",
      "[90]\tvalid_0's multi_logloss: 0.942849\tvalid_0's weighted-f1-score: 0.673751\n",
      "[100]\tvalid_0's multi_logloss: 0.929783\tvalid_0's weighted-f1-score: 0.674969\n",
      "[110]\tvalid_0's multi_logloss: 0.919789\tvalid_0's weighted-f1-score: 0.676036\n",
      "[120]\tvalid_0's multi_logloss: 0.911683\tvalid_0's weighted-f1-score: 0.67621\n",
      "[130]\tvalid_0's multi_logloss: 0.904783\tvalid_0's weighted-f1-score: 0.676256\n",
      "[140]\tvalid_0's multi_logloss: 0.899612\tvalid_0's weighted-f1-score: 0.676433\n",
      "[150]\tvalid_0's multi_logloss: 0.89537\tvalid_0's weighted-f1-score: 0.676763\n",
      "[160]\tvalid_0's multi_logloss: 0.891689\tvalid_0's weighted-f1-score: 0.678385\n",
      "[170]\tvalid_0's multi_logloss: 0.888791\tvalid_0's weighted-f1-score: 0.678698\n",
      "[180]\tvalid_0's multi_logloss: 0.886214\tvalid_0's weighted-f1-score: 0.678995\n",
      "[190]\tvalid_0's multi_logloss: 0.884087\tvalid_0's weighted-f1-score: 0.680253\n",
      "[200]\tvalid_0's multi_logloss: 0.882264\tvalid_0's weighted-f1-score: 0.680457\n",
      "[210]\tvalid_0's multi_logloss: 0.880737\tvalid_0's weighted-f1-score: 0.680593\n",
      "[220]\tvalid_0's multi_logloss: 0.879349\tvalid_0's weighted-f1-score: 0.680823\n",
      "[230]\tvalid_0's multi_logloss: 0.878049\tvalid_0's weighted-f1-score: 0.68127\n",
      "[240]\tvalid_0's multi_logloss: 0.877116\tvalid_0's weighted-f1-score: 0.681774\n",
      "[250]\tvalid_0's multi_logloss: 0.876187\tvalid_0's weighted-f1-score: 0.682233\n",
      "[260]\tvalid_0's multi_logloss: 0.875398\tvalid_0's weighted-f1-score: 0.68266\n",
      "[270]\tvalid_0's multi_logloss: 0.874746\tvalid_0's weighted-f1-score: 0.682671\n",
      "[280]\tvalid_0's multi_logloss: 0.874186\tvalid_0's weighted-f1-score: 0.68275\n",
      "[290]\tvalid_0's multi_logloss: 0.87367\tvalid_0's weighted-f1-score: 0.683064\n",
      "[300]\tvalid_0's multi_logloss: 0.873188\tvalid_0's weighted-f1-score: 0.683286\n",
      "[310]\tvalid_0's multi_logloss: 0.87268\tvalid_0's weighted-f1-score: 0.683599\n",
      "[320]\tvalid_0's multi_logloss: 0.872335\tvalid_0's weighted-f1-score: 0.683626\n",
      "[330]\tvalid_0's multi_logloss: 0.87199\tvalid_0's weighted-f1-score: 0.683659\n",
      "[340]\tvalid_0's multi_logloss: 0.87161\tvalid_0's weighted-f1-score: 0.684024\n",
      "[350]\tvalid_0's multi_logloss: 0.871309\tvalid_0's weighted-f1-score: 0.683992\n",
      "[360]\tvalid_0's multi_logloss: 0.870961\tvalid_0's weighted-f1-score: 0.683979\n",
      "[370]\tvalid_0's multi_logloss: 0.870663\tvalid_0's weighted-f1-score: 0.684175\n",
      "[380]\tvalid_0's multi_logloss: 0.870417\tvalid_0's weighted-f1-score: 0.684543\n",
      "[390]\tvalid_0's multi_logloss: 0.870134\tvalid_0's weighted-f1-score: 0.684554\n",
      "[400]\tvalid_0's multi_logloss: 0.869866\tvalid_0's weighted-f1-score: 0.684839\n",
      "[410]\tvalid_0's multi_logloss: 0.869683\tvalid_0's weighted-f1-score: 0.684937\n",
      "[420]\tvalid_0's multi_logloss: 0.869501\tvalid_0's weighted-f1-score: 0.68501\n",
      "[430]\tvalid_0's multi_logloss: 0.869315\tvalid_0's weighted-f1-score: 0.684845\n",
      "[440]\tvalid_0's multi_logloss: 0.869087\tvalid_0's weighted-f1-score: 0.684854\n",
      "[450]\tvalid_0's multi_logloss: 0.868977\tvalid_0's weighted-f1-score: 0.684972\n",
      "[460]\tvalid_0's multi_logloss: 0.868844\tvalid_0's weighted-f1-score: 0.685006\n",
      "[470]\tvalid_0's multi_logloss: 0.868697\tvalid_0's weighted-f1-score: 0.685124\n",
      "[480]\tvalid_0's multi_logloss: 0.868484\tvalid_0's weighted-f1-score: 0.685141\n",
      "[490]\tvalid_0's multi_logloss: 0.868391\tvalid_0's weighted-f1-score: 0.685078\n",
      "[500]\tvalid_0's multi_logloss: 0.868227\tvalid_0's weighted-f1-score: 0.685051\n",
      "[510]\tvalid_0's multi_logloss: 0.868079\tvalid_0's weighted-f1-score: 0.685298\n",
      "[520]\tvalid_0's multi_logloss: 0.867952\tvalid_0's weighted-f1-score: 0.685296\n",
      "[530]\tvalid_0's multi_logloss: 0.867908\tvalid_0's weighted-f1-score: 0.685347\n",
      "[540]\tvalid_0's multi_logloss: 0.867809\tvalid_0's weighted-f1-score: 0.685241\n",
      "[550]\tvalid_0's multi_logloss: 0.867676\tvalid_0's weighted-f1-score: 0.685251\n",
      "[560]\tvalid_0's multi_logloss: 0.867606\tvalid_0's weighted-f1-score: 0.685192\n",
      "[570]\tvalid_0's multi_logloss: 0.867524\tvalid_0's weighted-f1-score: 0.685278\n",
      "[580]\tvalid_0's multi_logloss: 0.867452\tvalid_0's weighted-f1-score: 0.685339\n",
      "[590]\tvalid_0's multi_logloss: 0.867379\tvalid_0's weighted-f1-score: 0.685436\n",
      "[600]\tvalid_0's multi_logloss: 0.867297\tvalid_0's weighted-f1-score: 0.685479\n",
      "[610]\tvalid_0's multi_logloss: 0.867263\tvalid_0's weighted-f1-score: 0.685528\n",
      "[620]\tvalid_0's multi_logloss: 0.867207\tvalid_0's weighted-f1-score: 0.685719\n",
      "[630]\tvalid_0's multi_logloss: 0.867146\tvalid_0's weighted-f1-score: 0.6858\n",
      "[640]\tvalid_0's multi_logloss: 0.867086\tvalid_0's weighted-f1-score: 0.685765\n",
      "[650]\tvalid_0's multi_logloss: 0.867039\tvalid_0's weighted-f1-score: 0.685843\n",
      "[660]\tvalid_0's multi_logloss: 0.867002\tvalid_0's weighted-f1-score: 0.685739\n",
      "[670]\tvalid_0's multi_logloss: 0.866947\tvalid_0's weighted-f1-score: 0.685795\n",
      "[680]\tvalid_0's multi_logloss: 0.866873\tvalid_0's weighted-f1-score: 0.685844\n",
      "[690]\tvalid_0's multi_logloss: 0.866816\tvalid_0's weighted-f1-score: 0.685835\n",
      "[700]\tvalid_0's multi_logloss: 0.866764\tvalid_0's weighted-f1-score: 0.685835\n",
      "[710]\tvalid_0's multi_logloss: 0.866742\tvalid_0's weighted-f1-score: 0.685861\n",
      "[720]\tvalid_0's multi_logloss: 0.866724\tvalid_0's weighted-f1-score: 0.685913\n",
      "[730]\tvalid_0's multi_logloss: 0.866643\tvalid_0's weighted-f1-score: 0.685896\n",
      "[740]\tvalid_0's multi_logloss: 0.866604\tvalid_0's weighted-f1-score: 0.685811\n",
      "[750]\tvalid_0's multi_logloss: 0.866607\tvalid_0's weighted-f1-score: 0.685905\n",
      "[760]\tvalid_0's multi_logloss: 0.866548\tvalid_0's weighted-f1-score: 0.685896\n",
      "[770]\tvalid_0's multi_logloss: 0.866524\tvalid_0's weighted-f1-score: 0.685954\n",
      "[780]\tvalid_0's multi_logloss: 0.866486\tvalid_0's weighted-f1-score: 0.685984\n",
      "[790]\tvalid_0's multi_logloss: 0.866484\tvalid_0's weighted-f1-score: 0.686007\n",
      "[800]\tvalid_0's multi_logloss: 0.866473\tvalid_0's weighted-f1-score: 0.686102\n",
      "[810]\tvalid_0's multi_logloss: 0.866459\tvalid_0's weighted-f1-score: 0.686147\n",
      "[820]\tvalid_0's multi_logloss: 0.866433\tvalid_0's weighted-f1-score: 0.686033\n",
      "[830]\tvalid_0's multi_logloss: 0.866391\tvalid_0's weighted-f1-score: 0.6861\n",
      "[840]\tvalid_0's multi_logloss: 0.866403\tvalid_0's weighted-f1-score: 0.686011\n",
      "[850]\tvalid_0's multi_logloss: 0.866346\tvalid_0's weighted-f1-score: 0.685935\n",
      "[860]\tvalid_0's multi_logloss: 0.866308\tvalid_0's weighted-f1-score: 0.685985\n",
      "[870]\tvalid_0's multi_logloss: 0.866324\tvalid_0's weighted-f1-score: 0.685923\n",
      "[880]\tvalid_0's multi_logloss: 0.866329\tvalid_0's weighted-f1-score: 0.686051\n",
      "[890]\tvalid_0's multi_logloss: 0.866311\tvalid_0's weighted-f1-score: 0.685987\n",
      "[900]\tvalid_0's multi_logloss: 0.86628\tvalid_0's weighted-f1-score: 0.685972\n",
      "Early stopping, best iteration is:\n",
      "[808]\tvalid_0's multi_logloss: 0.866458\tvalid_0's weighted-f1-score: 0.686167\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=0.8,\n",
       "        importance_type='split', learning_rate=0.05, max_depth=1,\n",
       "        metric='multiclass', min_child_samples=50, min_child_weight=0.001,\n",
       "        min_split_gain=0.0, n_estimators=2000, n_jobs=-1, num_leaves=61,\n",
       "        objective='multiclass', random_state=2019, reg_alpha=0,\n",
       "        reg_lambda=0.01, silent=True, subsample=0.8,\n",
       "        subsample_for_bin=200000, subsample_freq=1)"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "from lightgbm import LGBMClassifier\n",
    "from sklearn.metrics import f1_score\n",
    "from time import gmtime, strftime\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "# 模型评估，采用f1-score\n",
    "def f1_weighted(y_true, y_pred):\n",
    "    y_pred = y_pred.reshape(12, -1).T\n",
    "    y_pred = np.argmax(y_pred, axis=1)\n",
    "    score = f1_score(y_true, y_pred, average='weighted')\n",
    "    return 'weighted-f1-score', score, True\n",
    "\n",
    "# 切分训练集，以后七天的数据做验证集\n",
    "tra_x = train_x[train_x.req_time < '2018-11-24']\n",
    "tra_y = train_y[train_y.req_time < '2018-11-24']\n",
    "valid_x = train_x[train_x.req_time >= '2018-11-24']\n",
    "valid_y = train_y[train_y.req_time >= '2018-11-24']\n",
    "\n",
    "tra_x = tra_x.drop(['req_time'], axis=1)\n",
    "tra_y = tra_y.drop(['req_time'], axis=1)\n",
    "valid_x = valid_x.drop(['req_time'], axis=1)\n",
    "valid_y = valid_y.drop(['req_time'], axis=1)\n",
    "\n",
    "categorical_feature = ['max_dis_mode', 'min_dis_mode', 'max_price_mode', 'min_price_mode',\n",
    "                       'max_eta_mode', 'min_eta_mode', 'first_mode', 'pid_o_counts', 'pid_d_counts', 'o_d', 'pid_od_counts']\n",
    "\n",
    "lgb = LGBMClassifier(boosting_type='gbdt', num_leaves=61, objective='multiclass', reg_alpha=0, reg_lambda=0.01, max_depth=1, \n",
    "                    n_estimators=2000, subsample=0.8, colsample_bytree=0.8, subsample_freq=1, min_child_samples=50,\n",
    "                    learning_rate=0.05, random_state=2019, metric='multiclass', n_jobs=-1)\n",
    "eval_set = [(valid_x, valid_y)]\n",
    "lgb.fit(tra_x, tra_y, eval_set=eval_set, eval_metric=f1_weighted, categorical_feature=categorical_feature, verbose=10, early_stopping_rounds=100)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 特征重要度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>feature</th>\n",
       "      <th>imp</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>o_d</td>\n",
       "      <td>2610</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>121</th>\n",
       "      <td>pid_o_counts</td>\n",
       "      <td>1403</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>122</th>\n",
       "      <td>pid_d_counts</td>\n",
       "      <td>1268</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>46</th>\n",
       "      <td>first_mode</td>\n",
       "      <td>579</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>mode_feas_2</td>\n",
       "      <td>225</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>mode_feas_1</td>\n",
       "      <td>224</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>mode_feas_9</td>\n",
       "      <td>221</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>mode_feas_10</td>\n",
       "      <td>219</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>mode_feas_7</td>\n",
       "      <td>213</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27</th>\n",
       "      <td>mode_feas_11</td>\n",
       "      <td>191</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>mode_feas_8</td>\n",
       "      <td>158</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>mode_feas_5</td>\n",
       "      <td>155</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>mode_feas_6</td>\n",
       "      <td>151</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>47</th>\n",
       "      <td>p0</td>\n",
       "      <td>131</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>min_distance</td>\n",
       "      <td>117</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>mode_feas_4</td>\n",
       "      <td>112</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>mode_feas_3</td>\n",
       "      <td>97</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>svd_mode_0</td>\n",
       "      <td>97</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>123</th>\n",
       "      <td>pid_od_counts</td>\n",
       "      <td>97</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>39</th>\n",
       "      <td>std_eta</td>\n",
       "      <td>94</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>o_d_distance</td>\n",
       "      <td>86</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>45</th>\n",
       "      <td>min_eta_mode</td>\n",
       "      <td>83</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>136</th>\n",
       "      <td>pid_counts</td>\n",
       "      <td>80</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>svd_mode_5</td>\n",
       "      <td>80</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>44</th>\n",
       "      <td>max_eta_mode</td>\n",
       "      <td>74</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>svd_mode_2</td>\n",
       "      <td>71</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>113</th>\n",
       "      <td>NatioinalDay</td>\n",
       "      <td>61</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>svd_mode_6</td>\n",
       "      <td>52</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>119</th>\n",
       "      <td>hour</td>\n",
       "      <td>51</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>77</th>\n",
       "      <td>p30</td>\n",
       "      <td>47</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>58</th>\n",
       "      <td>p11</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>56</th>\n",
       "      <td>p9</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>55</th>\n",
       "      <td>p8</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>53</th>\n",
       "      <td>p6</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>51</th>\n",
       "      <td>p4</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50</th>\n",
       "      <td>p3</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>48</th>\n",
       "      <td>p1</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>42</th>\n",
       "      <td>max_price_mode</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>68</th>\n",
       "      <td>p21</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>71</th>\n",
       "      <td>p24</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>98</th>\n",
       "      <td>p51</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>72</th>\n",
       "      <td>p25</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>97</th>\n",
       "      <td>p50</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>96</th>\n",
       "      <td>p49</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>95</th>\n",
       "      <td>p48</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>94</th>\n",
       "      <td>p47</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>93</th>\n",
       "      <td>p46</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>92</th>\n",
       "      <td>p45</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>91</th>\n",
       "      <td>p44</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>90</th>\n",
       "      <td>p43</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>89</th>\n",
       "      <td>p42</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>88</th>\n",
       "      <td>p41</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>87</th>\n",
       "      <td>p40</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>86</th>\n",
       "      <td>p39</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>85</th>\n",
       "      <td>p38</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>83</th>\n",
       "      <td>p36</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>82</th>\n",
       "      <td>p35</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75</th>\n",
       "      <td>p28</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>74</th>\n",
       "      <td>p27</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>140</th>\n",
       "      <td>pid_W1_counts</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>141 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            feature   imp\n",
       "4               o_d  2610\n",
       "121    pid_o_counts  1403\n",
       "122    pid_d_counts  1268\n",
       "46       first_mode   579\n",
       "18      mode_feas_2   225\n",
       "17      mode_feas_1   224\n",
       "25      mode_feas_9   221\n",
       "26     mode_feas_10   219\n",
       "23      mode_feas_7   213\n",
       "27     mode_feas_11   191\n",
       "24      mode_feas_8   158\n",
       "21      mode_feas_5   155\n",
       "22      mode_feas_6   151\n",
       "47               p0   131\n",
       "29     min_distance   117\n",
       "20      mode_feas_4   112\n",
       "19      mode_feas_3    97\n",
       "6        svd_mode_0    97\n",
       "123   pid_od_counts    97\n",
       "39          std_eta    94\n",
       "5      o_d_distance    86\n",
       "45     min_eta_mode    83\n",
       "136      pid_counts    80\n",
       "11       svd_mode_5    80\n",
       "44     max_eta_mode    74\n",
       "8        svd_mode_2    71\n",
       "113    NatioinalDay    61\n",
       "12       svd_mode_6    52\n",
       "119            hour    51\n",
       "77              p30    47\n",
       "..              ...   ...\n",
       "58              p11     0\n",
       "56               p9     0\n",
       "55               p8     0\n",
       "53               p6     0\n",
       "51               p4     0\n",
       "50               p3     0\n",
       "48               p1     0\n",
       "42   max_price_mode     0\n",
       "68              p21     0\n",
       "71              p24     0\n",
       "98              p51     0\n",
       "72              p25     0\n",
       "97              p50     0\n",
       "96              p49     0\n",
       "95              p48     0\n",
       "94              p47     0\n",
       "93              p46     0\n",
       "92              p45     0\n",
       "91              p44     0\n",
       "90              p43     0\n",
       "89              p42     0\n",
       "88              p41     0\n",
       "87              p40     0\n",
       "86              p39     0\n",
       "85              p38     0\n",
       "83              p36     0\n",
       "82              p35     0\n",
       "75              p28     0\n",
       "74              p27     0\n",
       "140   pid_W1_counts     0\n",
       "\n",
       "[141 rows x 2 columns]"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "imp = pd.DataFrame()\n",
    "imp['feature'] = tra_x.columns\n",
    "imp['imp'] = lgb.feature_importances_\n",
    "imp = imp.sort_values('imp', ascending = False)\n",
    "imp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6861672939883036"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pre = lgb.predict(valid_x)\n",
    "f1_score(valid_y, pre, average='weighted')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 提交结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = train_x.drop(['req_time'], axis=1)\n",
    "y = train_y.drop(['req_time'], axis=1)\n",
    "\n",
    "lgb.n_estimators = lgb.best_iteration_\n",
    "lgb.fit(x, y, categorical_feature=categorical_feature)\n",
    "pred_test = lgb.predict(test_x)\n",
    "\n",
    "# 提交结果\n",
    "now_time = strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n",
    "submit['recommend_mode'] = pred_test\n",
    "submit.to_csv('submission_{}.csv'.format(now_time), index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
