{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 一、特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1、合并训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def merge_data():\n",
    "    # 标签\n",
    "    train_clicks = pd.read_csv(\"data_set_phase1//train_clicks.csv\")\n",
    "\n",
    "    # 特征数据集\n",
    "    train_plans = pd.read_csv(\"data_set_phase1//train_plans.csv\")\n",
    "    train_queries = pd.read_csv(\"data_set_phase1//train_queries.csv\")\n",
    "    test_plans = pd.read_csv(\"data_set_phase1//test_plans.csv\")\n",
    "    test_queries = pd.read_csv(\"data_set_phase1//test_queries.csv\")\n",
    "\n",
    "    # merge训练集\n",
    "    tra_data = train_queries.merge(train_plans, on='sid', how='left')\n",
    "    tra_data = tra_data.merge(train_clicks, on='sid', how='left')\n",
    "    tra_data = tra_data.drop(['click_time'], axis=1)\n",
    "    tra_data['click_mode'] = tra_data['click_mode'].fillna(0)\n",
    "\n",
    "    # merge测试集\n",
    "    tes_data = test_queries.merge(test_plans, on='sid', how='left')\n",
    "    tes_data['click_mode'] = -1\n",
    "\n",
    "    # concat训练集和测试集\n",
    "    all_data = pd.concat([tra_data, tes_data], axis=0)\n",
    "    all_data = all_data.drop(['plan_time'], axis=1)\n",
    "    all_data = all_data.reset_index(drop=True)\n",
    "    \n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2、抽取o、d的特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_od_feature(all_data):\n",
    "    all_data['o1'] = all_data['o'].apply(lambda x : float(x.split(',')[0]))\n",
    "    all_data['o2'] = all_data['o'].apply(lambda x : float(x.split(',')[1]))\n",
    "    all_data['d1'] = all_data['d'].apply(lambda x : float(x.split(',')[0]))\n",
    "    all_data['d2'] = all_data['d'].apply(lambda x : float(x.split(',')[1]))\n",
    "    all_data = all_data.drop(['o', 'd'], axis=1)\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3、抽取plans的特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 提取plans特征\n",
    "### 1、max_distance、min_distance、mean_distance、std_distance\n",
    "### 2、max_price、min_price、mean_price、std_price\n",
    "### 3、max_eta、min_eta、mean_eta、std_eta\n",
    "### 4、max_dis_mode、min_dis_mode、max_price_mode、min_price_mode、max_eta_mode、min_eta_mode\n",
    "### 5、first_mode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_plan_feature(all_data):\n",
    "    n = all_data.shape[0]\n",
    "    \n",
    "    # 初始化推荐给用户的plans，类似于one-hot编码，推荐了哪一个mode，就置为1\n",
    "    mode_list_feas = np.zeros((n, 12))\n",
    "\n",
    "    # 初始化最大距离、最小距离、平均距离、距离标准差\n",
    "    max_distance, min_distance, mean_distance, std_distance = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大价格、最小价格、平均价格、价格标准差\n",
    "    max_price, min_price, mean_price, std_price = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大用时、最小用时、平均用时、用时标准差\n",
    "    max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化最大距离mode、最小距离mode、最大价格mode、最小价格mode、最大用时mode、最小用时mode、第一推荐mode\n",
    "    max_dis_mode, min_dis_mode, max_price_mode, min_price_mode, max_eta_mode, min_eta_mode, first_mode = np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,)),np.zeros((n,))\n",
    "\n",
    "    # 初始化推荐mode的顺序\n",
    "    mode_texts=[]\n",
    "    \n",
    "    # 遍历每个用户的plan\n",
    "    for i, plan in tqdm(enumerate(all_data['plans'].values)):\n",
    "        try:\n",
    "            user_plan_list = json.loads(plan)\n",
    "        except:\n",
    "            user_plan_list = []\n",
    "        if len(user_plan_list)==0:\n",
    "            mode_list_feas[i, 0] = 1\n",
    "\n",
    "            first_mode[i] = 0\n",
    "\n",
    "            max_distance[i] = -1\n",
    "            min_distance[i] = -1\n",
    "            mean_distance[i] = -1\n",
    "            std_distance[i] = -1\n",
    "\n",
    "            max_price[i] = -1\n",
    "            min_price[i] = -1\n",
    "            mean_price[i] = -1\n",
    "            std_price[i] = -1\n",
    "\n",
    "            max_eta[i] = -1\n",
    "            min_eta[i] = -1\n",
    "            mean_eta[i] = -1\n",
    "            std_eta[i] = -1\n",
    "\n",
    "            max_dis_mode[i] = -1\n",
    "            min_dis_mode[i] = -1\n",
    "            max_price_mode[i] = -1\n",
    "            min_price_mode[i] = -1\n",
    "            max_eta_mode[i] = -1\n",
    "            min_eta_mode[i] = -1\n",
    "\n",
    "            mode_texts.append('word_null')\n",
    "        else:\n",
    "            distance_list = []\n",
    "            price_list = []\n",
    "            eta_list = []\n",
    "            mode_list = []\n",
    "\n",
    "            # 抽取每个用户的每个plan\n",
    "            for tmp_dict in user_plan_list:\n",
    "                distance_list.append(int(tmp_dict['distance']))\n",
    "                if tmp_dict['price']=='':\n",
    "                    price_list.append(0)\n",
    "                else:\n",
    "                    price_list.append(int(tmp_dict['price']))\n",
    "                eta_list.append(int(tmp_dict['eta']))\n",
    "                mode_list.append(int(tmp_dict['transport_mode']))\n",
    "\n",
    "            # 将每个用户的推荐模型按顺序添加\n",
    "            mode_texts.append(' '.join(['word_{}'.format(mode) for mode in mode_list]))\n",
    "\n",
    "            # 将list转换成ndarray\n",
    "            distance_list = np.array(distance_list)\n",
    "            price_list = np.array(price_list)\n",
    "            eta_list = np.array(eta_list)\n",
    "            mode_list = np.array(mode_list, dtype='int')\n",
    "            \n",
    "            # 将有plans推荐的用户的mode置为1\n",
    "            mode_list_feas[i, mode_list] = 1\n",
    "\n",
    "            # 获取索引\n",
    "            distance_sort_idx = np.argsort(distance_list)\n",
    "            price_sort_idx = np.argsort(price_list)\n",
    "            eta_sort_idx = np.argsort(eta_list)\n",
    "\n",
    "            # 构建特征\n",
    "            max_distance[i] = distance_list[distance_sort_idx[-1]]\n",
    "            min_distance[i] = distance_list[distance_sort_idx[0]]\n",
    "            mean_distance[i] = np.mean(distance_list)\n",
    "            std_distance[i] = np.std(distance_list)\n",
    "\n",
    "            max_price[i] = price_list[price_sort_idx[-1]]\n",
    "            min_price[i] = price_list[price_sort_idx[0]]\n",
    "            mean_price[i] = np.mean(price_list)\n",
    "            std_price[i] = np.std(price_list)\n",
    "\n",
    "            max_eta[i] = eta_list[eta_sort_idx[-1]]\n",
    "            min_eta[i] = eta_list[eta_sort_idx[0]]\n",
    "            mean_eta[i] = np.mean(eta_list)\n",
    "            std_eta[i] = np.std(eta_list)\n",
    "\n",
    "            first_mode[i] = mode_list[0]\n",
    "\n",
    "            max_dis_mode[i] = mode_list[distance_sort_idx[-1]]\n",
    "            min_dis_mode[i] = mode_list[distance_sort_idx[0]]\n",
    "\n",
    "            max_price_mode[i] = mode_list[price_sort_idx[-1]]\n",
    "            min_price_mode[i] = mode_list[price_sort_idx[0]]\n",
    "\n",
    "            max_eta_mode[i] = mode_list[eta_sort_idx[-1]]\n",
    "            min_eta_mode[i] = mode_list[eta_sort_idx[0]]\n",
    "\n",
    "    # 将特征存储进DataFrame中\n",
    "    plan_feature_data = pd.DataFrame(mode_list_feas)\n",
    "    plan_feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]\n",
    "\n",
    "    plan_feature_data['max_distance'] = max_distance\n",
    "    plan_feature_data['min_distance'] = min_distance\n",
    "    plan_feature_data['mean_distance'] = mean_distance\n",
    "    plan_feature_data['std_distance'] = std_distance\n",
    "\n",
    "    plan_feature_data['max_price'] = max_price\n",
    "    plan_feature_data['min_price'] = min_price\n",
    "    plan_feature_data['mean_price'] = mean_price\n",
    "    plan_feature_data['std_price'] = std_price\n",
    "\n",
    "    plan_feature_data['max_eta'] = max_eta\n",
    "    plan_feature_data['min_eta'] = min_eta\n",
    "    plan_feature_data['mean_eta'] = mean_eta\n",
    "    plan_feature_data['std_eta'] = std_eta\n",
    "\n",
    "    plan_feature_data['max_dis_mode'] = max_dis_mode\n",
    "    plan_feature_data['min_dis_mode'] = min_dis_mode\n",
    "    plan_feature_data['max_price_mode'] = max_price_mode\n",
    "    plan_feature_data['min_price_mode'] = min_price_mode\n",
    "    plan_feature_data['max_eta_mode'] = max_eta_mode\n",
    "    plan_feature_data['min_eta_mode'] = min_eta_mode\n",
    "\n",
    "    plan_feature_data['first_mode'] = first_mode\n",
    "\n",
    "    # tiidf提取特征\n",
    "    tfidf = TfidfVectorizer(ngram_range=(1, 2))\n",
    "    tfidf_vec = tfidf.fit_transform(mode_texts)\n",
    "    svd = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)\n",
    "    mode_svd = svd.fit_transform(tfidf_vec)\n",
    "    \n",
    "    # 转换成dataframe\n",
    "    mode_svd = pd.DataFrame(mode_svd)\n",
    "    mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]\n",
    "\n",
    "    all_data = pd.concat([all_data, mode_svd, plan_feature_data], axis=1)\n",
    "    all_data = all_data.drop(['plans'], axis=1)\n",
    "    \n",
    "    return  all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4、抽取profiles数据集特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_profiles_feature(all_data):\n",
    "    profiles = pd.read_csv(\"data_set_phase1//profiles.csv\")\n",
    "\n",
    "    # 用于填充没有pid的用户\n",
    "    profiles_na = np.zeros(67)\n",
    "    profiles_na[0] = -1\n",
    "    profiles_na = pd.DataFrame(profiles_na.reshape(1, -1))\n",
    "    profiles_na.columns = profiles.columns\n",
    "    profiles = profiles.append(profiles_na)\n",
    "    \n",
    "    # 对特征进行奇异值分解，实现降维\n",
    "#     pi = profiles.drop(['pid'], axis=1).values\n",
    "#     svd = TruncatedSVD(n_components=60, n_iter=20, random_state=2019)\n",
    "#     profiles_svd = svd.fit_transform(pi)\n",
    "    \n",
    "    # 转换成dataframe\n",
    "#     profiles_svd = pd.DataFrame(profiles_svd)\n",
    "#     profiles_svd.columns = ['svd_profiles_{}'.format(i) for i in range(60)]\n",
    "#     profiles_svd['pid'] = profiles['pid'].values\n",
    "\n",
    "    # 合并数据集\n",
    "    all_data['pid'] = all_data['pid'].fillna(-1)\n",
    "    all_data = all_data.merge(profiles, on='pid', how='left')\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 5、抽取时间特征（req_time）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def gen_time_feature(all_data):\n",
    "    all_data['req_time'] = pd.to_datetime(all_data['req_time'])\n",
    "    all_data['dayofweek'] = all_data['req_time'].dt.dayofweek\n",
    "    all_data['hourofday'] = all_data['req_time'].dt.hour\n",
    "    all_data = all_data.drop(['req_time'], axis=1)\n",
    "    return all_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 6、切分训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_test_split(all_data):\n",
    "    train_data = all_data[all_data['click_mode']!=-1]\n",
    "    test_data = all_data[all_data['click_mode']==-1]\n",
    "    test_data = test_data.drop(['click_mode'], axis=1)\n",
    "    submit = test_data[['sid']].copy()\n",
    "    \n",
    "    train_data = train_data.drop(['sid','pid'], axis=1)\n",
    "    train_y = train_data['click_mode']\n",
    "    train_x = train_data.drop(['click_mode'], axis=1)\n",
    "    test_x = test_data.drop(['sid','pid'], axis=1)\n",
    "    \n",
    "    return train_x, train_y, test_x, submit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "594358it [04:02, 2447.44it/s]\n"
     ]
    }
   ],
   "source": [
    "all_data = merge_data()\n",
    "all_data = gen_od_feature(all_data)\n",
    "all_data = gen_plan_feature(all_data)\n",
    "all_data = gen_profiles_feature(all_data)\n",
    "all_data = gen_time_feature(all_data)\n",
    "train_x, train_y, test_x, submit = train_test_split(all_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 7、模型训练&验证&提交"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\Program Files\\Anaconda\\lib\\site-packages\\lightgbm\\basic.py:1186: UserWarning: Using categorical_feature in Dataset.\n",
      "  warnings.warn('Using categorical_feature in Dataset.')\n",
      "E:\\Program Files\\Anaconda\\lib\\site-packages\\lightgbm\\basic.py:752: UserWarning: categorical_feature in param dict is overridden.\n",
      "  warnings.warn('categorical_feature in param dict is overridden.')\n",
      "E:\\Program Files\\Anaconda\\lib\\site-packages\\sklearn\\metrics\\classification.py:1143: UndefinedMetricWarning: F-score is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.01795\tvalid_0's weighted-f1-score: 0.669467\n",
      "[100]\tvalid_0's multi_logloss: 0.923867\tvalid_0's weighted-f1-score: 0.673266\n",
      "[150]\tvalid_0's multi_logloss: 0.901965\tvalid_0's weighted-f1-score: 0.67495\n",
      "[200]\tvalid_0's multi_logloss: 0.895609\tvalid_0's weighted-f1-score: 0.675337\n",
      "[250]\tvalid_0's multi_logloss: 0.893235\tvalid_0's weighted-f1-score: 0.67557\n",
      "[300]\tvalid_0's multi_logloss: 0.892134\tvalid_0's weighted-f1-score: 0.675779\n",
      "[350]\tvalid_0's multi_logloss: 0.891532\tvalid_0's weighted-f1-score: 0.675988\n",
      "[400]\tvalid_0's multi_logloss: 0.89113\tvalid_0's weighted-f1-score: 0.676224\n",
      "[450]\tvalid_0's multi_logloss: 0.890888\tvalid_0's weighted-f1-score: 0.676503\n",
      "[500]\tvalid_0's multi_logloss: 0.890752\tvalid_0's weighted-f1-score: 0.676508\n",
      "Early stopping, best iteration is:\n",
      "[485]\tvalid_0's multi_logloss: 0.890778\tvalid_0's weighted-f1-score: 0.676696\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.01426\tvalid_0's weighted-f1-score: 0.670336\n",
      "[100]\tvalid_0's multi_logloss: 0.919243\tvalid_0's weighted-f1-score: 0.674168\n",
      "[150]\tvalid_0's multi_logloss: 0.897052\tvalid_0's weighted-f1-score: 0.675494\n",
      "[200]\tvalid_0's multi_logloss: 0.890394\tvalid_0's weighted-f1-score: 0.676189\n",
      "[250]\tvalid_0's multi_logloss: 0.887881\tvalid_0's weighted-f1-score: 0.676628\n",
      "[300]\tvalid_0's multi_logloss: 0.886849\tvalid_0's weighted-f1-score: 0.676923\n",
      "[350]\tvalid_0's multi_logloss: 0.886266\tvalid_0's weighted-f1-score: 0.67703\n",
      "[400]\tvalid_0's multi_logloss: 0.885924\tvalid_0's weighted-f1-score: 0.677066\n",
      "Early stopping, best iteration is:\n",
      "[365]\tvalid_0's multi_logloss: 0.886163\tvalid_0's weighted-f1-score: 0.67719\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.01566\tvalid_0's weighted-f1-score: 0.669021\n",
      "[100]\tvalid_0's multi_logloss: 0.921223\tvalid_0's weighted-f1-score: 0.672272\n",
      "[150]\tvalid_0's multi_logloss: 0.899082\tvalid_0's weighted-f1-score: 0.673632\n",
      "[200]\tvalid_0's multi_logloss: 0.892373\tvalid_0's weighted-f1-score: 0.674311\n",
      "[250]\tvalid_0's multi_logloss: 0.88988\tvalid_0's weighted-f1-score: 0.674642\n",
      "[300]\tvalid_0's multi_logloss: 0.888735\tvalid_0's weighted-f1-score: 0.674902\n",
      "[350]\tvalid_0's multi_logloss: 0.888144\tvalid_0's weighted-f1-score: 0.675238\n",
      "[400]\tvalid_0's multi_logloss: 0.887685\tvalid_0's weighted-f1-score: 0.675494\n",
      "[450]\tvalid_0's multi_logloss: 0.887381\tvalid_0's weighted-f1-score: 0.675544\n",
      "[500]\tvalid_0's multi_logloss: 0.887123\tvalid_0's weighted-f1-score: 0.675706\n",
      "[550]\tvalid_0's multi_logloss: 0.886904\tvalid_0's weighted-f1-score: 0.67575\n",
      "[600]\tvalid_0's multi_logloss: 0.88664\tvalid_0's weighted-f1-score: 0.675694\n",
      "Early stopping, best iteration is:\n",
      "[577]\tvalid_0's multi_logloss: 0.886796\tvalid_0's weighted-f1-score: 0.675898\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.01937\tvalid_0's weighted-f1-score: 0.667881\n",
      "[100]\tvalid_0's multi_logloss: 0.924977\tvalid_0's weighted-f1-score: 0.671764\n",
      "[150]\tvalid_0's multi_logloss: 0.902776\tvalid_0's weighted-f1-score: 0.673278\n",
      "[200]\tvalid_0's multi_logloss: 0.895851\tvalid_0's weighted-f1-score: 0.674003\n",
      "[250]\tvalid_0's multi_logloss: 0.893184\tvalid_0's weighted-f1-score: 0.67452\n",
      "[300]\tvalid_0's multi_logloss: 0.892033\tvalid_0's weighted-f1-score: 0.674476\n",
      "[350]\tvalid_0's multi_logloss: 0.891491\tvalid_0's weighted-f1-score: 0.674604\n",
      "[400]\tvalid_0's multi_logloss: 0.891089\tvalid_0's weighted-f1-score: 0.674955\n",
      "[450]\tvalid_0's multi_logloss: 0.890729\tvalid_0's weighted-f1-score: 0.675154\n",
      "Early stopping, best iteration is:\n",
      "[411]\tvalid_0's multi_logloss: 0.890979\tvalid_0's weighted-f1-score: 0.675224\n",
      "Training until validation scores don't improve for 50 rounds.\n",
      "[50]\tvalid_0's multi_logloss: 1.01687\tvalid_0's weighted-f1-score: 0.669112\n",
      "[100]\tvalid_0's multi_logloss: 0.922288\tvalid_0's weighted-f1-score: 0.672707\n",
      "[150]\tvalid_0's multi_logloss: 0.900274\tvalid_0's weighted-f1-score: 0.673958\n",
      "[200]\tvalid_0's multi_logloss: 0.893653\tvalid_0's weighted-f1-score: 0.674716\n",
      "[250]\tvalid_0's multi_logloss: 0.891116\tvalid_0's weighted-f1-score: 0.674905\n",
      "[300]\tvalid_0's multi_logloss: 0.889956\tvalid_0's weighted-f1-score: 0.675259\n",
      "Early stopping, best iteration is:\n",
      "[299]\tvalid_0's multi_logloss: 0.889969\tvalid_0's weighted-f1-score: 0.675318\n",
      "cv f1_score: 0.6760652093334515\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import lightgbm as lgb\n",
    "from sklearn.metrics import f1_score\n",
    "from time import gmtime, strftime\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "\n",
    "def f1_weighted(y_pred, train_data):\n",
    "    y_true = train_data.label\n",
    "    y_pred = y_pred.reshape(12, -1).T\n",
    "    y_pred = np.argmax(y_pred, axis=1)\n",
    "    f1 = f1_score(y_true, y_pred, average='weighted')\n",
    "    return 'weighted-f1-score', f1, True\n",
    "\n",
    "kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=2019)\n",
    "\n",
    "lgb_paras = {\n",
    "    'objective': 'multiclass',\n",
    "    'metrics': 'multiclass',\n",
    "    'learning_rate': 0.05,\n",
    "    'num_leaves': 31,\n",
    "    'lambda_l1': 0.01,\n",
    "    'lambda_l2': 10,\n",
    "    'num_class': 12,\n",
    "    'seed': 2019,\n",
    "    'feature_fraction': 0.8,\n",
    "    'bagging_fraction': 0.8,\n",
    "    'bagging_freq': 4\n",
    "}\n",
    "\n",
    "categorical_feature = ['max_dis_mode', 'min_dis_mode', 'max_price_mode', 'min_price_mode',\n",
    "                       'max_eta_mode', 'min_eta_mode', 'first_mode','dayofweek','hourofday']\n",
    "scores = []\n",
    "result_proba = []\n",
    "for tra_idx, val_idx in kfold.split(train_x, train_y):\n",
    "    tra_x, tra_y, val_x, val_y = train_x.iloc[tra_idx], train_y[tra_idx], train_x.iloc[val_idx], train_y[val_idx]\n",
    "    train_set = lgb.Dataset(tra_x, tra_y, categorical_feature=categorical_feature)\n",
    "    val_set = lgb.Dataset(val_x, val_y, categorical_feature=categorical_feature)\n",
    "    lgb_model = lgb.train(lgb_paras, train_set, valid_sets=[val_set], early_stopping_rounds=50, num_boost_round=40000, verbose_eval=50, feval=f1_weighted)\n",
    "    val_pred = np.argmax(lgb_model.predict(val_x, num_iteration=lgb_model.best_iteration), axis=1)\n",
    "    val_score = f1_score(val_y, val_pred, average='weighted')\n",
    "    result_proba.append(lgb_model.predict(test_x, num_iteration=lgb_model.best_iteration))\n",
    "    scores.append(val_score)\n",
    "print('cv f1_score:', np.mean(scores))\n",
    "pred_test = np.argmax(np.mean(result_proba, axis=0), axis=1)\n",
    "\n",
    "# 提交结果\n",
    "now_time = strftime(\"%Y-%m-%d-%H-%M-%S\", gmtime())\n",
    "submit['recommend_mode'] = pred_test\n",
    "submit.to_csv('submission_{}.csv'.format(now_time), index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
