{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: tqdm in d:\\program\\anaconda\\envs\\ai\\lib\\site-packages (4.46.0)\n",
      "Requirement already satisfied: lightgbm in d:\\program\\anaconda\\envs\\ai\\lib\\site-packages (2.3.0)\n",
      "Requirement already satisfied: numpy in d:\\program\\anaconda\\envs\\ai\\lib\\site-packages (from lightgbm) (1.17.4)\n",
      "Requirement already satisfied: scipy in d:\\program\\anaconda\\envs\\ai\\lib\\site-packages (from lightgbm) (1.3.2)\n",
      "Requirement already satisfied: scikit-learn in d:\\program\\anaconda\\envs\\ai\\lib\\site-packages (from lightgbm) (0.22.1)\n",
      "Requirement already satisfied: joblib>=0.11 in d:\\program\\anaconda\\envs\\ai\\lib\\site-packages (from scikit-learn->lightgbm) (0.14.1)\n"
     ]
    }
   ],
   "source": [
    "! pip install tqdm\n",
    "! pip install lightgbm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "from sklearn.metrics import mean_squared_error,explained_variance_score\n",
    "from sklearn.model_selection import KFold\n",
    "import lightgbm as lgb\n",
    "import math\n",
    "import os\n",
    "from joblib import Parallel, delayed\n",
    "\n",
    "test_data_path = '../data/testData0626.csv'\n",
    "route_order_folder_path = '../data/route_order_data_several'\n",
    "port_path = '../data/port.csv'\n",
    "result_path = '../result_server_20200626_B_several.csv'\n",
    "\n",
    "# import moxing as mox\n",
    "# OBS_RES_PATH =  \"s3://ship-eta/result/result_server_20200622.csv\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def format_data_type(data, mode='train'):\n",
    "    if mode=='test':\n",
    "        data['onboardDate'] = pd.to_datetime(data['onboardDate'], infer_datetime_format=True)\n",
    "        data['temp_timestamp'] = data['timestamp']\n",
    "        data['ETA'] = None\n",
    "        data['creatDate'] = None\n",
    "    data['loadingOrder'] = data['loadingOrder'].astype(str)\n",
    "    data['timestamp'] = pd.to_datetime(data['timestamp'], infer_datetime_format=True)\n",
    "    data['longitude'] = data['longitude'].astype(float)\n",
    "    data['latitude'] = data['latitude'].astype(float)\n",
    "    data['speed'] = data['speed'].astype(float)\n",
    "    data['TRANSPORT_TRACE'] = data['TRANSPORT_TRACE'].astype(str)\n",
    "    return data\n",
    "\n",
    "def get_test_data_info(path):\n",
    "    data = pd.read_csv(path) \n",
    "#     test_trace_set = data['TRANSPORT_TRACE'].unique()\n",
    "#     test_trace_set = ['CNSHK-CLVAP','CNSHK-GRPIR','CNNSA-GHTEM','CNNSA-NAWVB']\n",
    "    test_trace_set = ['HONGKONG-BU','CNHKG-ARBUE','CNYTN-ARENA','HKHKG-FRFOS']\n",
    "    test_order_belong_to_trace = {}\n",
    "    for item in test_trace_set:\n",
    "        orders = data[data['TRANSPORT_TRACE'] == item]['loadingOrder'].unique()\n",
    "        test_order_belong_to_trace[item] = orders\n",
    "    return format_data_type(data, mode='test'), test_trace_set, test_order_belong_to_trace\n",
    "\n",
    "test_data_origin, test_trace_set, test_order_belong_to_trace = get_test_data_info(test_data_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_port_info():\n",
    "    port_data = {}\n",
    "    test_port_set = set()\n",
    "    for route in test_trace_set:\n",
    "        ports = route.split('-')\n",
    "        test_port_set = set.union(test_port_set, set(ports))\n",
    "    port_data_origin = pd.read_csv(port_path)\n",
    "    for item in port_data_origin.itertuples():\n",
    "        if getattr(item, 'TRANS_NODE_NAME') in test_port_set:\n",
    "            port_data[getattr(item, 'TRANS_NODE_NAME')] = {'LONGITUDE': getattr(item, 'LONGITUDE'),'LATITUDE': getattr(item, 'LATITUDE') }\n",
    "    return port_data\n",
    "port_data = get_port_info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_train_route_order_data(route):\n",
    "    route_order_data_path = os.path.join(route_order_folder_path, \"{}.csv\".format(route))\n",
    "    data_origin = pd.read_csv(route_order_data_path, header=None, usecols = [0,2,3,4]\n",
    "           , names=['loadingOrder','longitude','latitude','speed'])\n",
    "    if (data_origin.shape[0] == 0):\n",
    "        print(\"error == \", route)\n",
    "    route_order_data_path = os.path.join(route_order_folder_path, \"{}_speed_dis_time.csv\".format(route))\n",
    "    route_order_handled = pd.read_csv(route_order_data_path, usecols = [1,2,3,4,5,6,7,8,9,10])\n",
    "    return data_origin, route_order_handled"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "agg_dict = {'latitude':['median'], 'longitude':['median'], 'speed':['median','max','mean']}\n",
    "cols = ['{}_{}'.format(k1, k2) for k1 in agg_dict for k2 in agg_dict[k1]]\n",
    "\n",
    "def handle_train_data(name, order_info_set, route_order_handled):\n",
    "    order_info_set = order_info_set.reset_index(drop=True)\n",
    "    start_index = int(route_order_handled[route_order_handled['loadingOrder'] == name].start_index)\n",
    "    end_index = int(route_order_handled[route_order_handled['loadingOrder'] == name].end_index)\n",
    "#         人工截取前 40% 的数据   \n",
    "    order_info_set = order_info_set[start_index:end_index+1]\n",
    "    cut_size = math.ceil(order_info_set.shape[0]*0.4)\n",
    "    order_info_set = order_info_set[0:cut_size]\n",
    "#         截取数据\n",
    "    if (order_info_set.shape[0] > 100):\n",
    "        index = np.linspace(0, order_info_set.shape[0]-1, num=100,dtype=int).tolist()\n",
    "        order_info_set = order_info_set.iloc[index]     \n",
    "#         获取特征\n",
    "    speed_median = float(route_order_handled[route_order_handled['loadingOrder'] == name].speed_median)\n",
    "    speed_max = float(route_order_handled[route_order_handled['loadingOrder'] == name].speed_max)\n",
    "    speed_mean = float(route_order_handled[route_order_handled['loadingOrder'] == name].speed_mean)\n",
    "    total_dis = float(route_order_handled[route_order_handled['loadingOrder'] == name].total_dis)\n",
    "    latitude_median = float(route_order_handled[route_order_handled['loadingOrder'] == name].latitude_median)\n",
    "    longitude_median = float(route_order_handled[route_order_handled['loadingOrder'] == name].longitude_median)\n",
    "    feature_temp = pd.DataFrame({'loadingOrder':[name], 'speed_median':[speed_median],\n",
    "                                 'speed_max':[speed_max], 'speed_mean':[speed_mean],\n",
    "                                 'total_dis':[total_dis],'latitude_median':[latitude_median], \n",
    "                                 'longitude_median':[longitude_median],})\n",
    "    label = route_order_handled[route_order_handled['loadingOrder'] == name].label\n",
    "    feature_temp['label'] = float(label)\n",
    "    return feature_temp   \n",
    "\n",
    "def get_train_data(route_order_info, route_order_handled):\n",
    "    order_list = route_order_info['loadingOrder'].unique()\n",
    "    print(route, order_list.shape)\n",
    "    \n",
    "    data_grouped = route_order_info.groupby('loadingOrder')\n",
    "\n",
    "    train_data = Parallel(n_jobs=8)(delayed(handle_train_data)(name, group, route_order_handled) for name, group in data_grouped)\n",
    "    train_data = pd.concat(train_data)\n",
    "#     for name, group in data_grouped:\n",
    "#         handle_train_data(name, group, route_order_handled)\n",
    "    if (train_data.shape[0] < 10):\n",
    "        for i in range(5):\n",
    "            train_data = pd.concat([train_data,train_data])\n",
    "    return train_data.reset_index(drop=True)\n",
    "\n",
    "def get_test_data(order):\n",
    "    order_info_set = test_data_origin[test_data_origin['loadingOrder'] == order].sort_values(by='timestamp')\n",
    "    feature = order_info_set.groupby('loadingOrder').agg(agg_dict).reset_index()\n",
    "    feature.columns = ['loadingOrder'] + cols\n",
    "    return feature.reset_index(drop=True)\n",
    "def mse_score_eval(preds, valid):\n",
    "    labels = valid.get_label()\n",
    "    scores = mean_squared_error(y_true=labels, y_pred=preds)\n",
    "    return 'mse_score', scores, True\n",
    "def train_model(x, y, seed=981125, is_shuffle=True):\n",
    "    train_pred = np.zeros((x.shape[0], ))\n",
    "    n_splits = min(5, x.shape[0])\n",
    "    # Kfold\n",
    "    fold = KFold(n_splits=n_splits, shuffle=is_shuffle, random_state=seed)\n",
    "    kf_way = fold.split(x)\n",
    "    # params\n",
    "    params = {\n",
    "        'learning_rate': 0.01,\n",
    "        'boosting_type': 'gbdt',\n",
    "        'objective': 'regression',\n",
    "        'num_leaves': 36,\n",
    "        'feature_fraction': 0.6,\n",
    "        'bagging_fraction': 0.7,\n",
    "        'bagging_freq': 6,\n",
    "        'seed': 8,\n",
    "        'bagging_seed': 1,\n",
    "        'feature_fraction_seed': 7,\n",
    "        'min_data_in_leaf': 25,\n",
    "        'nthread': 8,\n",
    "        'verbose': 1,\n",
    "    }\n",
    "    # train\n",
    "    for n_fold, (train_idx, valid_idx) in enumerate(kf_way, start=1):\n",
    "        train_x, train_y = x.iloc[train_idx], y.iloc[train_idx]\n",
    "        valid_x, valid_y = x.iloc[valid_idx], y.iloc[valid_idx]\n",
    "        # 数据加载\n",
    "        n_train = lgb.Dataset(train_x, label=train_y)\n",
    "        n_valid = lgb.Dataset(valid_x, label=valid_y)\n",
    "        clf = lgb.train(\n",
    "            params=params,\n",
    "            train_set=n_train,\n",
    "            num_boost_round=3000,\n",
    "            valid_sets=[n_valid],\n",
    "            early_stopping_rounds=100,\n",
    "            verbose_eval=100,\n",
    "            feval=mse_score_eval\n",
    "        )\n",
    "        train_pred[valid_idx] = clf.predict(valid_x, num_iteration=clf.best_iteration)\n",
    "    return clf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "csv_3200_path = '../result_server_20200626_B_origin.csv'\n",
    "csv_3200_best = pd.read_csv(csv_3200_path)\n",
    "csv_3200_best['onboardDate'] = pd.to_datetime(csv_3200_best['onboardDate'], infer_datetime_format=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "tags": [
     "outputPrepend"
    ]
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/4 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=================================\n",
      "HONGKONG-BU (84,)\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.00709e+13\tvalid_0's mse_score: 1.00709e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.00709e+13\tvalid_0's mse_score: 1.00709e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.00704e+13\tvalid_0's mse_score: 1.00704e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.00704e+13\tvalid_0's mse_score: 1.00704e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 9.82873e+12\tvalid_0's mse_score: 9.82873e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 9.82873e+12\tvalid_0's mse_score: 9.82873e+12\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.0764e+13\tvalid_0's mse_score: 1.0764e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.0764e+13\tvalid_0's mse_score: 1.0764e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 9.75256e+12\tvalid_0's mse_score: 9.75256e+12"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 25%|██▌       | 1/4 [00:12<00:38, 12.77s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 9.75256e+12\tvalid_0's mse_score: 9.75256e+12\n",
      "=================================\n",
      "CNHKG-ARBUE (84,)\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.00562e+13\tvalid_0's mse_score: 1.00562e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.00562e+13\tvalid_0's mse_score: 1.00562e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.00561e+13\tvalid_0's mse_score: 1.00561e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.00561e+13\tvalid_0's mse_score: 1.00561e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 9.81673e+12\tvalid_0's mse_score: 9.81673e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 9.81673e+12\tvalid_0's mse_score: 9.81673e+12\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.075e+13\tvalid_0's mse_score: 1.075e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.075e+13\tvalid_0's mse_score: 1.075e+13\n",
      "Training until validation scores don't improve for 100 rounds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 50%|█████     | 2/4 [00:23<00:24, 12.17s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[100]\tvalid_0's l2: 9.73632e+12\tvalid_0's mse_score: 9.73632e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 9.73632e+12\tvalid_0's mse_score: 9.73632e+12\n",
      "=================================\n",
      "CNYTN-ARENA (84,)\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.00572e+13\tvalid_0's mse_score: 1.00572e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.00572e+13\tvalid_0's mse_score: 1.00572e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.00555e+13\tvalid_0's mse_score: 1.00555e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.00555e+13\tvalid_0's mse_score: 1.00555e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 9.81649e+12\tvalid_0's mse_score: 9.81649e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 9.81649e+12\tvalid_0's mse_score: 9.81649e+12\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 1.07497e+13\tvalid_0's mse_score: 1.07497e+13\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 1.07497e+13\tvalid_0's mse_score: 1.07497e+13\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 9.73374e+12\tvalid_0's mse_score: 9.73374e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 9.73374e+12\tvalid_0's mse_score: 9.73374e+12\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 75%|███████▌  | 3/4 [00:36<00:12, 12.41s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=================================\n",
      "HKHKG-FRFOS (86,)\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 6.29223e+12\tvalid_0's mse_score: 6.29223e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 6.29223e+12\tvalid_0's mse_score: 6.29223e+12\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 6.98557e+12\tvalid_0's mse_score: 6.98557e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 6.98557e+12\tvalid_0's mse_score: 6.98557e+12\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 6.16908e+12\tvalid_0's mse_score: 6.16908e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 6.16908e+12\tvalid_0's mse_score: 6.16908e+12\n",
      "Training until validation scores don't improve for 100 rounds\n",
      "[100]\tvalid_0's l2: 5.55055e+12\tvalid_0's mse_score: 5.55055e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 5.55055e+12\tvalid_0's mse_score: 5.55055e+12\n",
      "Training until validation scores don't improve for 100 rounds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 4/4 [00:44<00:00, 11.06s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[100]\tvalid_0's l2: 5.53645e+12\tvalid_0's mse_score: 5.53645e+12\n",
      "Early stopping, best iteration is:\n",
      "[1]\tvalid_0's l2: 5.53645e+12\tvalid_0's mse_score: 5.53645e+12\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "for route in tqdm(test_order_belong_to_trace):\n",
    "    print('=================================')\n",
    "    route_order_info, route_order_handled = get_train_route_order_data(route)\n",
    "    train_data = get_train_data(route_order_info, route_order_handled)\n",
    "    features = [c for c in train_data.columns if c not in ['loadingOrder', 'label', 'start_index', 'end_index', 'total_dis']]\n",
    "#     print(train_data[features])\n",
    "#     print(train_data['label'])\n",
    "    model_by_route = train_model(train_data[features], train_data['label'])\n",
    "    for order in test_order_belong_to_trace[route]:\n",
    "        test_order_data = get_test_data(order)\n",
    "        res = model_by_route.predict(test_order_data[features], num_iteration=model_by_route.best_iteration)\n",
    "        csv_3200_best.loc[test_data_origin['loadingOrder'] == order, 'ETA'] = (csv_3200_best[csv_3200_best['loadingOrder'] == order]['onboardDate'] + pd.Timedelta(seconds=res[0])).apply(lambda x:x.strftime('%Y/%m/%d  %H:%M:%S'))\n",
    "    \n",
    "# test_data_origin['creatDate'] = pd.datetime.now().strftime('%Y/%m/%d  %H:%M:%S')\n",
    "# test_data_origin['timestamp'] = test_data_origin['temp_timestamp']\n",
    "\n",
    "# result = test_data_origin[['loadingOrder', 'timestamp', 'longitude', 'latitude', 'carrierName', 'vesselMMSI', 'onboardDate', 'ETA', 'creatDate']]\n",
    "# result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "csv_3200_best.to_csv(result_path, index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 4/4 [00:00<00:00, 4008.89it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ZA229272050987\n",
      "YC498927281293\n",
      "HJ246261379392\n",
      "HV544902512699\n",
      "JU360847167491\n",
      "KM466086744301\n",
      "KW203223353208\n",
      "QD564688243325\n",
      "YI904717006355\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "test_trace_set = ['HONGKONG-BU','CNHKG-ARBUE','CNYTN-ARENA','HKHKG-FRFOS']\n",
    "for route in tqdm(test_trace_set):\n",
    "    for order in test_order_belong_to_trace[route]:\n",
    "        print(order)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.6 64-bit ('AI': conda)",
   "language": "python",
   "name": "python37664bitaiconda6859e03b37c34f0182c9bde8073269f7"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
