{
 "cells": [
  {
   "cell_type": "code",
<<<<<<< HEAD
   "execution_count": 4,
=======
   "execution_count": null,
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import csv\n",
    "import pickle\n",
    "import operator\n",
    "import gc\n",
    "from joblib import Parallel, delayed"
   ]
  },
  {
   "cell_type": "code",
<<<<<<< HEAD
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "origin_train_data_path = '../../data/train0711.csv'\n",
    "# origin_train_data_path = '../../data/train_spilit.csv'\n",
    "port_info_path = '../../data/DataForModelB/port_info_dict_dump.file'\n",
    "\n",
    "carrierNameSet_info_path = '../../data/DataForModelB/data_for_train/carrierNameSet_dump.file'\n",
    "washed_train_order_brief_path = '../../data/DataForModelB/data_for_train/washed_train_order_brief.csv'\n",
    "train_data_by_order_path_folder = '../../data/DataForModelB/data_for_train/train_data_by_order'"
=======
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# origin_train_data_path = '../data/train0523.csv'\n",
    "origin_train_data_path = '../data/train_spilit.csv'\n",
    "port_info_path = '../data/DataForModelB/port_info_dict_dump.file'\n",
    "\n",
    "carrierNameSet_info_path = '../data/DataForModelB/data_for_train/carrierNameSet_dump.file'\n",
    "washed_train_order_brief_path = '../data/DataForModelB/data_for_train/washed_train_order_brief.csv'\n",
    "train_data_by_order_path_folder = '../data/DataForModelB/data_for_train/train_data_by_order'"
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
   ]
  },
  {
   "cell_type": "code",
<<<<<<< HEAD
   "execution_count": 6,
=======
   "execution_count": null,
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
   "metadata": {},
   "outputs": [],
   "source": [
    "# load carrierName\n",
    "carrierName_set = pd.read_csv(origin_train_data_path, usecols = [1], header=None, names=['carrierName'])['carrierName'].unique()\n",
    "with open(carrierNameSet_info_path, \"wb\") as f:\n",
    "    pickle.dump(carrierName_set, f)"
   ]
  },
  {
   "cell_type": "code",
<<<<<<< HEAD
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      "0it [00:00, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1 FGPEPT <class 'str'>\n",
      "2 OIEQNT <class 'str'>\n",
      "3 JCMFTA <class 'str'>\n",
      "4 OYSCFP <class 'str'>\n",
      "5 RWHZVZ <class 'str'>\n",
      "6 VMUSFS <class 'str'>\n",
      "7 UQCRKD <class 'str'>\n",
      "8 UCQOQO <class 'str'>\n",
      "9 JONOCD <class 'str'>\n",
      "10 YAAWEN <class 'str'>\n",
      "11 RFRFUU <class 'str'>\n",
      "12 KJIHXD <class 'str'>\n",
      "13 USCDYK <class 'str'>\n",
      "14 BHSOUA <class 'str'>\n",
      "15 VRFMKJ <class 'str'>\n",
      "16 HMKTVZ <class 'str'>\n",
      "17 EHGYJK <class 'str'>\n",
      "18 NWLGLX <class 'str'>\n",
      "19 ATREQJ <class 'str'>\n",
      "20 VFUZBJ <class 'str'>\n",
      "21 OQTZLW <class 'str'>\n",
      "22 YNTVVL <class 'str'>\n",
      "23 ENSBXC <class 'str'>\n",
      "24 XONUPN <class 'str'>\n",
      "25 RMZCAZ <class 'str'>\n",
      "26 WGITMV <class 'str'>\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\r",
      "1it [00:09,  9.82s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1 FGPEPT <class 'str'>\n",
      "2 OIEQNT <class 'str'>\n",
      "3 JCMFTA <class 'str'>\n",
      "4 OYSCFP <class 'str'>\n",
      "5 RWHZVZ <class 'str'>\n",
      "6 VMUSFS <class 'str'>\n",
      "7 UQCRKD <class 'str'>\n",
      "8 UCQOQO <class 'str'>\n",
      "9 JONOCD <class 'str'>\n",
      "10 YAAWEN <class 'str'>\n",
      "11 RFRFUU <class 'str'>\n",
      "12 KJIHXD <class 'str'>\n",
      "13 USCDYK <class 'str'>\n",
      "14 BHSOUA <class 'str'>\n",
      "15 VRFMKJ <class 'str'>\n",
      "16 HMKTVZ <class 'str'>\n",
      "17 EHGYJK <class 'str'>\n",
      "18 NWLGLX <class 'str'>\n",
      "19 ATREQJ <class 'str'>\n",
      "20 VFUZBJ <class 'str'>\n",
      "21 OQTZLW <class 'str'>\n",
      "22 YNTVVL <class 'str'>\n",
      "23 ENSBXC <class 'str'>\n",
      "24 XONUPN <class 'str'>\n",
      "25 RMZCAZ <class 'str'>\n",
      "26 WGITMV <class 'str'>\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "1it [00:14, 14.96s/it]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-25-10e9cf404426>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     28\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     29\u001b[0m     \u001b[0;31m# 输出不同订单的数据\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m     \u001b[0;32mfor\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtrain_data_origin\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitertuples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     31\u001b[0m         \u001b[0morder\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloadingOrder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     32\u001b[0m         \u001b[0mrow_info\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mrow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_asdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<string>\u001b[0m in \u001b[0;36m_make\u001b[0;34m(cls, iterable, new, len)\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
=======
   "execution_count": null,
   "metadata": {},
   "outputs": [],
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
   "source": [
    "order_list = []\n",
    "\n",
    "# 给每个订单单独的写指针，每个订单的数据写到不同文件\n",
    "csv_file = {}\n",
    "csv_writer = {}\n",
    "\n",
<<<<<<< HEAD
    "train_data_origin_chunk = pd.read_csv(origin_train_data_path, chunksize = 2000000, usecols = [0,1,2,3,4,6,7], header=None\n",
    "                                          , names=['loadingOrder', 'carrierName','timestamp','longitude','latitude','speed','direction'])    \n",
=======
    "train_data_origin_chunk = pd.read_csv(origin_train_data_path, chunksize = 2000000, usecols = [0,1,2,3,4,6,7,12], header=None\n",
    "                                          , names=['loadingOrder', 'carrierName','timestamp','longitude','latitude','speed','direction','TRANSPORT_TRACE'])    \n",
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
    "\n",
    "for train_data_origin in tqdm(train_data_origin_chunk):\n",
    "    # if speed < 0, drop it!\n",
    "    train_data_origin = train_data_origin[train_data_origin['speed'] > 0]\n",
    "    \n",
    "    valid_orders = train_data_origin['loadingOrder'].unique()\n",
    "    for order in valid_orders:\n",
    "        if not order in order_list:\n",
    "            order_list.append(order)\n",
    "            order_writer_path = os.path.join(train_data_by_order_path_folder, \"{}_origin_data.csv\".format(order))\n",
    "            csv_file[order] = open(order_writer_path,'w',encoding='utf-8',newline='')\n",
    "            csv_writer[order] = csv.writer(csv_file[order])            \n",
    "    \n",
    "    # 将货运公司编号为ID\n",
    "    for index ,carrierName in enumerate(carrierName_set, 1):\n",
<<<<<<< HEAD
    "        if not carrierName in train_data_origin['carrierName']:\n",
    "            continue\n",
    "        train_data_origin.loc[train_data_origin['carrierName'] == carrierName, 'carrierName'] = index\n",
=======
    "        train_data_origin.loc[(train_data_origin.carrierName == carrierName), 'carrierName'] = index\n",
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
    "    \n",
    "    # 输出不同订单的数据\n",
    "    for row in train_data_origin.itertuples():\n",
    "        order = row.loadingOrder\n",
    "        row_info = row._asdict()\n",
    "        row_info.pop('Index')\n",
    "        csv_writer[order].writerow(row_info.values())\n",
    "\n",
<<<<<<< HEAD
    "        \n",
=======
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
    "for k,v in csv_file.items():\n",
    "    v.close()\n",
    "\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个 order 的处理函数\n",
    "def _handle_single_order_for_start_info_and_arrive_info(o_index, order):\n",
    "    # 获取当前订单的数据并且重新编码订单号（缩小文件体积）\n",
    "    order_writer_path = os.path.join(train_data_by_order_path_folder, \"{}_origin_data.csv\".format(order))\n",
    "    order_data = pd.read_csv(order_writer_path, header=None,\n",
    "                            names=['loadingOrder', 'carrierName','timestamp','longitude','latitude','speed','direction','TRANSPORT_TRACE'])\n",
    "    order_data['loadingOrder'] = o_index+1\n",
    "    # 获取承运商、船只ID、路由\n",
    "    order_ID = order_data.loc[0, 'loadingOrder']\n",
    "    carrierName = order_data.loc[0, 'carrierName']\n",
    "\n",
    "    order_data_path = os.path.join(train_data_by_order_path_folder, \"{}_gps_data.csv\".format(order_ID))\n",
    "    order_data.to_csv(order_data_path, header=False, index=False,\n",
    "                      columns=['timestamp', 'longitude', 'latitude', 'speed','direction'])\n",
    "    return order_ID, carrierName\n",
    "\n",
    "\n",
    "# get each order's info and write to file\n",
    "train_data = Parallel(n_jobs=8)(delayed(_handle_single_order_for_start_info_and_arrive_info)\n",
    "                                (index, order)\n",
    "                                for index, order in enumerate(tqdm(order_list)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# order_brief 的写指针\n",
    "order_brief_csvfile = open(washed_train_order_brief_path, 'w', newline='')\n",
    "order_brief_writer = csv.writer(order_brief_csvfile)\n",
    "order_brief_writer.writerow(['loadingOrder', 'carrierName'])\n",
    "\n",
    "# 写 washed_train_order_brief 与 washed_train_order_gps\n",
    "for item in tqdm(train_data):\n",
    "    if not item:\n",
    "        continue\n",
    "    order_brief_writer.writerow(item[0:5])\n",
    "\n",
    "order_brief_csvfile.close()\n",
    "\n",
    "# 清理原始文件\n",
    "for order in order_list:\n",
    "    order_writer_path = os.path.join(train_data_by_order_path_folder, \"{}_origin_data.csv\".format(order))\n",
    "    os.remove(order_writer_path)"
   ]
<<<<<<< HEAD
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
=======
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
  }
 ],
 "metadata": {
  "kernelspec": {
<<<<<<< HEAD
   "display_name": "Conda-python3",
   "language": "python",
   "name": "conda-python3"
=======
   "display_name": "Python 3.7.6 64-bit ('AI': conda)",
   "language": "python",
   "name": "python37664bitaiconda6859e03b37c34f0182c9bde8073269f7"
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
<<<<<<< HEAD
   "version": "3.6.4"
=======
   "version": "3.7.6"
>>>>>>> 081c522bdcef1cb40c539a5a14ec6d26a3b53059
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
