{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "from sklearn.metrics import mean_squared_error,explained_variance_score\n",
    "from sklearn.model_selection import KFold\n",
    "import lightgbm as lgb\n",
    "test_data_path = '../data/A_testData0531.csv'\n",
    "train_gps_path = '../data/train0523.csv'\n",
    "port_path = '../data/port.csv'\n",
    "result_path = '../result/result_local.csv'\n",
    "\n",
    "# import moxing as mox\n",
    "# OBS_DATA_PATH = \"s3://ship-eta/data/train0523.csv\"\n",
    "# OBS_TEST_PATH = \"s3://ship-eta/data/A_testData0531.csv\"\n",
    "# OBS_RES_PATH =  \"s3://ship-eta/result/result_local.csv\"\n",
    "# mox.file.copy_parallel(OBS_DATA_PATH, train_gps_path)\n",
    "# mox.file.copy_parallel(OBS_TEST_PATH, test_data_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def format_data_type(data, mode='train'):\n",
    "    if mode=='test':\n",
    "        data['onboardDate'] = pd.to_datetime(data['onboardDate'], infer_datetime_format=True)\n",
    "        data['temp_timestamp'] = data['timestamp']\n",
    "        data['ETA'] = None\n",
    "    data['creatDate'] = None\n",
    "    data['loadingOrder'] = data['loadingOrder'].astype(str)\n",
    "    data['timestamp'] = pd.to_datetime(data['timestamp'], infer_datetime_format=True)\n",
    "    data['longitude'] = data['longitude'].astype(float)\n",
    "    data['latitude'] = data['latitude'].astype(float)\n",
    "    data['speed'] = data['speed'].astype(float)\n",
    "    data['TRANSPORT_TRACE'] = data['TRANSPORT_TRACE'].astype(str)\n",
    "    return data\n",
    "\n",
    "def get_test_data_info(path):\n",
    "    data = pd.read_csv(path) \n",
    "    test_trace_set = data['TRANSPORT_TRACE'].unique()\n",
    "    test_order_belong_to_trace = {}\n",
    "    for item in test_trace_set:\n",
    "        orders = data[data['TRANSPORT_TRACE'] == item]['loadingOrder'].unique()\n",
    "        test_order_belong_to_trace[item] = orders\n",
    "    return format_data_type(data, mode='test'), test_trace_set, test_order_belong_to_trace\n",
    "\n",
    "test_data_origin, test_trace_set, test_order_belong_to_trace = get_test_data_info(test_data_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_port_info():\n",
    "    port_data = {}\n",
    "    test_port_set = set()\n",
    "    for route in test_trace_set:\n",
    "        ports = route.split('-')\n",
    "        test_port_set = set.union(test_port_set, set(ports))\n",
    "    port_data_origin = pd.read_csv(port_path)\n",
    "    for item in port_data_origin.itertuples():\n",
    "        if getattr(item, 'TRANS_NODE_NAME') in test_port_set:\n",
    "            port_data[getattr(item, 'TRANS_NODE_NAME')] = {'LONGITUDE': getattr(item, 'LONGITUDE'),'LATITUDE': getattr(item, 'LATITUDE') }\n",
    "    return port_data\n",
    "port_data = get_port_info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/22 [00:00<?, ?it/s]\n",
      "0it [00:00, ?it/s]\u001b[A\n",
      "1it [00:03,  3.92s/it]\u001b[A\n",
      "2it [00:08,  4.19s/it]\u001b[A\n",
      "3it [00:13,  4.29s/it]\u001b[A\n",
      "4it [00:17,  4.38s/it]\u001b[A\n",
      "5it [00:22,  4.41s/it]\u001b[A\n",
      "6it [00:26,  4.44s/it]\u001b[A\n",
      "7it [00:31,  4.54s/it]\u001b[A\n",
      "8it [00:36,  4.72s/it]\u001b[A\n",
      "9it [00:41,  4.72s/it]\u001b[A\n",
      "10it [00:46,  4.72s/it]\u001b[A\n",
      "11it [00:50,  4.68s/it]\u001b[A\n",
      "12it [00:55,  4.64s/it]\u001b[A\n",
      "13it [00:59,  4.59s/it]\u001b[A\n",
      "14it [01:04,  4.53s/it]\u001b[A\n",
      "15it [01:09,  4.65s/it]\u001b[A\n",
      "16it [01:13,  4.67s/it]\u001b[A\n",
      "17it [01:18,  4.61s/it]\u001b[A\n",
      "18it [01:23,  4.63s/it]\u001b[A\n",
      "19it [01:27,  4.59s/it]\u001b[A\n",
      "20it [01:32,  4.64s/it]\u001b[A\n",
      "21it [01:37,  4.69s/it]\u001b[A\n",
      "22it [01:41,  4.75s/it]\u001b[A\n",
      "23it [01:46,  4.69s/it]\u001b[A\n",
      "24it [01:51,  4.88s/it]\u001b[A\n",
      "25it [01:57,  5.11s/it]\u001b[A\n",
      "26it [02:02,  5.05s/it]\u001b[A\n",
      "27it [02:07,  4.96s/it]\u001b[A\n",
      "28it [02:11,  4.88s/it]\u001b[A\n",
      "29it [02:16,  4.75s/it]\u001b[A\n",
      "30it [02:20,  4.71s/it]\u001b[A\n",
      "31it [02:25,  4.67s/it]\u001b[A\n",
      "32it [02:30,  4.85s/it]\u001b[A\n",
      "33it [02:35,  4.91s/it]\u001b[A\n",
      "34it [02:40,  4.89s/it]\u001b[A\n",
      "35it [02:45,  5.00s/it]\u001b[A\n",
      "36it [02:50,  5.02s/it]\u001b[A\n",
      "37it [02:56,  5.13s/it]\u001b[A\n",
      "38it [03:02,  5.31s/it]\u001b[A\n",
      "39it [03:07,  5.31s/it]\u001b[A\n",
      "40it [03:12,  5.39s/it]\u001b[A\n",
      "41it [03:18,  5.51s/it]\u001b[A\n",
      "42it [03:23,  5.39s/it]\u001b[A\n",
      "43it [03:28,  5.25s/it]\u001b[A\n",
      "44it [03:34,  5.31s/it]\u001b[A\n",
      "45it [03:39,  5.35s/it]\u001b[A\n",
      "46it [03:44,  5.32s/it]\u001b[A\n",
      "47it [03:50,  5.29s/it]\u001b[A\n",
      "48it [03:55,  5.20s/it]\u001b[A\n",
      "49it [03:59,  5.09s/it]\u001b[A\n",
      "50it [04:05,  5.22s/it]\u001b[A\n",
      "51it [04:10,  5.10s/it]\u001b[A\n",
      "52it [04:15,  5.02s/it]\u001b[A\n",
      "53it [04:19,  4.98s/it]\u001b[A\n",
      "54it [04:24,  4.98s/it]\u001b[A\n",
      "55it [04:40,  8.16s/it]\u001b[A\n",
      "56it [04:59, 11.54s/it]\u001b[A\n",
      "57it [05:16, 12.97s/it]\u001b[A\n",
      "58it [05:27, 12.46s/it]\u001b[A\n",
      "59it [05:46, 14.49s/it]\u001b[A\n",
      "60it [05:59, 13.86s/it]\u001b[A\n",
      "61it [06:04, 11.42s/it]\u001b[A\n",
      "62it [06:10,  9.80s/it]\u001b[A\n",
      "63it [06:16,  8.56s/it]\u001b[A\n",
      "64it [06:21,  7.59s/it]\u001b[A\n",
      "65it [06:27,  6.91s/it]\u001b[A\n",
      "66it [06:32,  6.41s/it]\u001b[A\n",
      "67it [06:37,  5.99s/it]\u001b[A\n",
      "68it [06:43,  5.96s/it]\u001b[A\n",
      "69it [06:48,  5.84s/it]\u001b[A\n",
      "70it [06:54,  5.84s/it]\u001b[A\n",
      "71it [07:00,  5.76s/it]\u001b[A\n",
      "72it [07:06,  5.77s/it]\u001b[A\n",
      "73it [07:11,  5.78s/it]\u001b[A\n",
      "74it [07:18,  5.92s/it]\u001b[A\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "===== 2019-02-14 16:43:26+00:00\n",
      "===== 2019-02-14 16:43:26+00:00\n",
      "===== 2019-02-23 14:37:21+00:00\n",
      "===== 2019-04-11 23:57:10+00:00\n",
      "===== 2019-04-04 21:53:21+00:00\n",
      "===== 2019-04-26 15:14:08+00:00\n",
      "===== 2019-04-26 15:16:09+00:00\n",
      "===== 2019-04-26 15:16:09+00:00\n",
      "===== 2019-04-26 15:16:09+00:00\n",
      "===== 2019-05-13 02:30:25+00:00\n",
      "===== 2019-05-18 15:50:37+00:00\n",
      "===== 2019-05-30 14:43:35+00:00\n",
      "===== 2019-08-08 14:43:30+00:00\n",
      "===== 2019-08-08 14:43:30+00:00\n",
      "===== 2019-08-15 07:24:20+00:00\n",
      "===== 2019-08-15 07:24:20+00:00\n",
      "===== 2019-08-29 19:45:21+00:00\n",
      "===== 2019-08-29 19:45:21+00:00\n",
      "===== 2019-08-29 19:45:21+00:00\n",
      "===== 2019-08-29 19:45:21+00:00\n",
      "===== 2019-08-29 19:45:21+00:00\n",
      "===== 2019-09-05 14:48:22+00:00\n",
      "===== 2019-09-05 14:48:22+00:00\n",
      "===== 2019-09-11 15:50:42+00:00\n",
      "===== 2019-09-05 14:48:22+00:00\n",
      "===== 2019-09-20 08:44:15+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-09-26 15:18:25+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-03 13:35:08+00:00\n",
      "===== 2019-10-11 13:32:09+00:00\n",
      "===== 2019-10-11 13:32:09+00:00\n",
      "===== 2019-10-11 13:32:09+00:00\n",
      "===== 2019-10-11 13:32:09+00:00\n",
      "===== 2019-10-11 13:32:09+00:00\n",
      "===== 2019-10-11 13:32:09+00:00\n",
      "===== 2019-11-07 16:50:54+00:00\n",
      "===== 2019-11-21 16:56:50+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-11-14 16:14:31+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-05 14:48:06+00:00\n",
      "===== 2019-12-26 16:08:07+00:00\n",
      "===== 2019-12-26 16:08:07+00:00\n",
      "===== 2020-01-08 16:48:57+00:00\n",
      "===== 2020-02-06 21:04:39+00:00\n",
      "===== 2020-02-06 21:04:39+00:00\n",
      "===== 2020-02-13 15:48:12+00:00\n",
      "===== 2020-02-21 11:03:59+00:00\n",
      "===== 2020-02-21 11:03:59+00:00\n",
      "===== 2020-02-20 14:53:43+00:00\n",
      "===== 2020-02-20 14:53:43+00:00\n",
      "===== 2020-02-20 14:53:43+00:00\n",
      "===== 2020-02-20 14:53:43+00:00\n",
      "===== 2020-02-29 00:46:41+00:00\n",
      "===== 2020-02-29 00:46:41+00:00\n",
      "===== 2020-02-29 00:46:41+00:00\n",
      "===== 2020-03-09 15:50:30+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-12 15:08:15+00:00\n",
      "===== 2020-03-16 14:57:45+00:00\n",
      "===== 2020-03-21 22:17:17+00:00\n",
      "===== 2020-03-21 22:17:17+00:00\n",
      "===== 2020-03-21 22:17:17+00:00\n",
      "===== 2020-03-21 22:17:17+00:00\n",
      "===== 2020-04-02 15:02:33+00:00\n",
      "===== 2020-03-21 22:17:17+00:00\n",
      "===== 2020-03-30 05:58:09+00:00\n",
      "===== 2020-04-02 15:32:09+00:00\n",
      "===== 2020-04-02 15:32:09+00:00\n",
      "===== 2020-04-02 15:32:09+00:00\n",
      "===== 2020-04-02 15:32:09+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-09 15:08:29+00:00\n",
      "===== 2020-04-17 12:30:36+00:00\n",
      "===== 2020-04-17 12:30:36+00:00\n",
      "===== 2020-04-17 12:30:36+00:00\n",
      "===== 2020-04-17 12:30:36+00:00\n",
      "===== 2020-04-20 06:50:06+00:00\n",
      "===== 2020-04-20 06:50:06+00:00\n",
      "===== 2020-04-20 06:50:06+00:00\n",
      "===== 2020-04-23 15:04:01+00:00\n",
      "===== 2020-04-23 15:04:01+00:00\n",
      "===== 2020-04-23 15:04:01+00:00\n",
      "===== 2020-04-23 15:04:01+00:00\n",
      "===== 2020-04-23 15:04:01+00:00\n",
      "===== 2020-04-27 16:03:10+00:00\n",
      "===== 2020-04-27 16:03:10+00:00\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 3.76053e+11\tvalid_0's mse_score: 3.76053e+11\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 4.54089e+11\tvalid_0's mse_score: 4.54089e+11\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 5.69511e+10\tvalid_0's mse_score: 5.69511e+10\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 8.68981e+10\tvalid_0's mse_score: 8.68981e+10\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.2237e+11\tvalid_0's mse_score: 1.2237e+11\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.68296e+11\tvalid_0's mse_score: 1.68296e+11\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 2.08223e+11\tvalid_0's mse_score: 2.08223e+11\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 2.45238e+11\tvalid_0's mse_score: 2.45238e+11\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.89532e+11\tvalid_0's mse_score: 1.89532e+11\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 2.47304e+11\tvalid_0's mse_score: 2.47304e+11\n",
      "     loadingOrder  latitude_min  latitude_max  latitude_mean  latitude_median  \\\n",
      "0  CF946210847851     40.278787     45.867787      42.064898        41.619457   \n",
      "\n",
      "   longitude_min  longitude_max  longitude_mean  longitude_median  speed_min  \\\n",
      "0     138.471062     153.302453      142.916387        141.265315       28.0   \n",
      "\n",
      "   speed_max  speed_mean  speed_median  \n",
      "0       35.0   30.391304          30.0  "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/22 [07:49<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def get_train_order_by_route(route):\n",
    "    ports = route.split(\"-\")\n",
    "    start_port = ports[0]\n",
    "    dest_port = ports[-1]\n",
    "    train_order_by_route = None\n",
    "    train_data_origin_chunk = pd.read_csv(train_gps_path, chunksize = 2000000, usecols = [0,2,3,4,6,12], header=None)\n",
    "    for chunk in tqdm(train_data_origin_chunk):\n",
    "        valid_order_name = chunk[chunk[12].apply(lambda x: str(x).startswith(start_port) and (dest_port in str(x)))][0].unique()\n",
    "        if (valid_order_name.size > 0):\n",
    "            valid_order_info = chunk[chunk[0].isin(valid_order_name)]\n",
    "            train_order_by_route = pd.concat([train_order_by_route,valid_order_info])\n",
    "#             break\n",
    "    train_order_by_route.columns = ['loadingOrder','timestamp','longitude','latitude','speed', 'TRANSPORT_TRACE']\n",
    "    data = format_data_type(train_order_by_route, mode='train')\n",
    "    return data\n",
    "\n",
    "def get_train_data(route_order_info, route):\n",
    "    ports = route.split(\"-\")\n",
    "    dest_port = ports[-1]\n",
    "    dest_longitude = port_data[dest_port]['LONGITUDE']\n",
    "    dest_latitude = port_data[dest_port]['LATITUDE']\n",
    "    train_data = None\n",
    "    order_list = route_order_info['loadingOrder'].unique()\n",
    "    for order in order_list:\n",
    "        order_info_set = route_order_info[route_order_info['loadingOrder'] == order].sort_values(by='timestamp')\n",
    "#         获取起航时间\n",
    "        for info_item in order_info_set.itertuples():\n",
    "            if getattr(info_item, 'speed') > 0:\n",
    "                start_time = getattr(info_item, 'timestamp')\n",
    "                break\n",
    "#         获取到达目的地时间，这里需要改用 GPS 判断\n",
    "        end_time = order_info_set['timestamp'].max()\n",
    "        for info_item in order_info_set.itertuples():\n",
    "            if abs(getattr(info_item, 'longitude') - dest_longitude) < 1 and abs(getattr(info_item, 'latitude') - dest_latitude) < 1:\n",
    "                end_time = min(end_time, getattr(info_item, 'timestamp'))\n",
    "                break   \n",
    "#         人工截取前 40% 的数据   \n",
    "        cut_size = math.ceil(order_info_set.shape[0]*0.4)\n",
    "        order_info_set = order_info_set[0:cut_size]\n",
    "        \n",
    "        agg_function = ['min', 'max', 'mean', 'median']\n",
    "        agg_col = ['latitude', 'longitude', 'speed']\n",
    "        feature_temp = order_info_set.groupby('loadingOrder')[agg_col].agg(agg_function).reset_index()\n",
    "        feature_temp.columns = ['loadingOrder'] + ['{}_{}'.format(i, j) for i in agg_col for j in agg_function]\n",
    "        #         算出航行用时\n",
    "        feature_temp['label'] = (end_time - start_time).total_seconds()\n",
    "        train_data = pd.concat([train_data,feature_temp])\n",
    "    if (train_data.shape[0] < 10):\n",
    "        for i in range(5):\n",
    "            train_data = pd.concat([train_data,train_data])\n",
    "    return train_data.reset_index(drop=True)\n",
    "def get_test_data(order):\n",
    "    order_info_set = test_data_origin[test_data_origin['loadingOrder'] == order].sort_values(by='timestamp')\n",
    "    agg_function = ['min', 'max', 'mean', 'median']\n",
    "    agg_col = ['latitude', 'longitude', 'speed']\n",
    "    feature = order_info_set.groupby('loadingOrder')[agg_col].agg(agg_function).reset_index()\n",
    "    feature.columns = ['loadingOrder'] + ['{}_{}'.format(i, j) for i in agg_col for j in agg_function]\n",
    "    return feature.reset_index(drop=True)\n",
    "def mse_score_eval(preds, valid):\n",
    "    labels = valid.get_label()\n",
    "    scores = mean_squared_error(y_true=labels, y_pred=preds)\n",
    "    return 'mse_score', scores, True\n",
    "def train_model(x, y, seed=981125, is_shuffle=True):\n",
    "    train_pred = np.zeros((x.shape[0], ))\n",
    "    n_splits = min(5, x.shape[0])\n",
    "    # Kfold\n",
    "    fold = KFold(n_splits=n_splits, shuffle=is_shuffle, random_state=seed)\n",
    "    kf_way = fold.split(x)\n",
    "    # params\n",
    "    params = {\n",
    "        'learning_rate': 0.01,\n",
    "        'boosting_type': 'gbdt',\n",
    "        'objective': 'regression',\n",
    "        'num_leaves': 36,\n",
    "        'feature_fraction': 0.6,\n",
    "        'bagging_fraction': 0.7,\n",
    "        'bagging_freq': 6,\n",
    "        'seed': 8,\n",
    "        'bagging_seed': 1,\n",
    "        'feature_fraction_seed': 7,\n",
    "        'min_data_in_leaf': 25,\n",
    "        'nthread': 8,\n",
    "        'verbose': 1,\n",
    "    }\n",
    "    # train\n",
    "    for n_fold, (train_idx, valid_idx) in enumerate(kf_way, start=1):\n",
    "        train_x, train_y = x.iloc[train_idx], y.iloc[train_idx]\n",
    "        valid_x, valid_y = x.iloc[valid_idx], y.iloc[valid_idx]\n",
    "        # 数据加载\n",
    "        n_train = lgb.Dataset(train_x, label=train_y)\n",
    "        n_valid = lgb.Dataset(valid_x, label=valid_y)\n",
    "        clf = lgb.train(\n",
    "            params=params,\n",
    "            train_set=n_train,\n",
    "            num_boost_round=3000,\n",
    "            valid_sets=[n_valid],\n",
    "            early_stopping_rounds=100,\n",
    "            verbose_eval=100,\n",
    "            feval=mse_score_eval\n",
    "        )\n",
    "        train_pred[valid_idx] = clf.predict(valid_x, num_iteration=clf.best_iteration)\n",
    "    return clf\n",
    "\n",
    "\n",
    "for route in tqdm(test_order_belong_to_trace):\n",
    "    route_order_info = get_train_order_by_route(route)\n",
    "    train_data = get_train_data(route_order_info, route)\n",
    "    \n",
    "    features = [c for c in train_data.columns if c not in ['loadingOrder', 'label']]\n",
    "    model_by_route = train_model(train_data[features], train_data['label'])\n",
    "    \n",
    "    for order in test_order_belong_to_trace[route]:\n",
    "        test_order_data = get_test_data(order)\n",
    "        print(test_order_data)\n",
    "        res = model_by_route.predict(test_order_data[features], num_iteration=model_by_route.best_iteration)\n",
    "        test_data_origin.loc[test_data_origin['loadingOrder'] == order, 'ETA'] = (test_data_origin[test_data_origin['loadingOrder'] == order]['onboardDate'] + pd.Timedelta(seconds=res[0])).apply(lambda x:x.strftime('%Y/%m/%d  %H:%M:%S'))\n",
    "#         print(test_data_origin[test_data_origin['loadingOrder'] == order])\n",
    "        break\n",
    "    break\n",
    "\n",
    "test_data_origin['creatDate'] = pd.datetime.now().strftime('%Y/%m/%d  %H:%M:%S')\n",
    "test_data_origin['timestamp'] = test_data_origin['temp_timestamp']\n",
    "\n",
    "result = test_data_origin[['loadingOrder', 'timestamp', 'longitude', 'latitude', 'carrierName', 'vesselMMSI', 'onboardDate', 'ETA', 'creatDate']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'df' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-10-c94468607fc4>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mdf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mresult_path\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[0mmox\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcopy_parallel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mresult_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mOBS_RES_PATH\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'df' is not defined"
     ]
    }
   ],
   "source": [
    "result.to_csv(result_path)\n",
    "# mox.file.copy_parallel(result_path, OBS_RES_PATH)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.6 64-bit ('AI': conda)",
   "language": "python",
   "name": "python37664bitaiconda6859e03b37c34f0182c9bde8073269f7"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
