{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Populating the interactive namespace from numpy and matplotlib\n"
     ]
    }
   ],
   "source": [
    "import os, sys, glob\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "import time\n",
    "import datetime\n",
    "\n",
    "from joblib import Parallel, delayed\n",
    "from sklearn.metrics import f1_score, log_loss, classification_report\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "import lightgbm as lgb\n",
    "\n",
    "%pylab inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "单独统计每个渔船id的统计信息，LGB训练；\n",
    "\n",
    "CV 0.67, 线上0.62"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_feat(path, test_mode=False):\n",
    "    df = pd.read_csv(path)\n",
    "    df = df.iloc[::-1]\n",
    "    \n",
    "    if test_mode:\n",
    "        df_feat = [df['渔船ID'].iloc[0], df['type'].iloc[0]]\n",
    "        df = df.drop(['type'], axis=1)\n",
    "    else:\n",
    "        df_feat = [df['渔船ID'].iloc[0]]\n",
    "        \n",
    "    df['time'] = df['time'].apply(lambda x: datetime.datetime.strptime(x, \"%m%d %H:%M:%S\"))\n",
    "    df_diff = df.diff(1).iloc[1:]\n",
    "    df_diff['time_seconds'] = df_diff['time'].dt.total_seconds()\n",
    "    df_diff['dis'] = np.sqrt(df_diff['x']**2 + df_diff['y']**2)\n",
    "    \n",
    "    df_feat.append(df['time'].dt.day.nunique())\n",
    "    df_feat.append(df['time'].dt.hour.min())\n",
    "    df_feat.append(df['time'].dt.hour.max())\n",
    "    df_feat.append(df['time'].dt.hour.value_counts().index[0])\n",
    "\n",
    "    df_feat.append(df['速度'].min())\n",
    "    df_feat.append(df['速度'].max())\n",
    "    df_feat.append(df['速度'].mean())\n",
    "\n",
    "    # df_feat.append(df_diff['time'].min())\n",
    "    # df_feat.append(df_diff['time'].max())\n",
    "    # df_feat.append(df_diff['time'].mean())\n",
    "    \n",
    "    df_feat.append(df_diff['速度'].min())\n",
    "    df_feat.append(df_diff['速度'].max())\n",
    "    df_feat.append(df_diff['速度'].mean())\n",
    "    df_feat.append((df_diff['速度'] > 0).mean())\n",
    "    df_feat.append((df_diff['速度'] == 0).mean())\n",
    "\n",
    "    df_feat.append(df_diff['方向'].min())\n",
    "    df_feat.append(df_diff['方向'].max())\n",
    "    df_feat.append(df_diff['方向'].mean())\n",
    "    df_feat.append((df_diff['方向'] > 0).mean())\n",
    "    df_feat.append((df_diff['方向'] == 0).mean())\n",
    "\n",
    "    df_feat.append((df_diff['x'].abs() / df_diff['time_seconds']).min())\n",
    "    df_feat.append((df_diff['x'].abs() / df_diff['time_seconds']).max())\n",
    "    df_feat.append((df_diff['x'].abs() / df_diff['time_seconds']).mean())\n",
    "    df_feat.append((df_diff['x'] > 0).mean())\n",
    "    df_feat.append((df_diff['x'] == 0).mean())\n",
    "\n",
    "    df_feat.append((df_diff['y'].abs() / df_diff['time_seconds']).min())\n",
    "    df_feat.append((df_diff['y'].abs() / df_diff['time_seconds']).max())\n",
    "    df_feat.append((df_diff['y'].abs() / df_diff['time_seconds']).mean())\n",
    "    df_feat.append((df_diff['y'] > 0).mean())\n",
    "    df_feat.append((df_diff['y'] == 0).mean())\n",
    "    \n",
    "    df_feat.append(df_diff['dis'].min())\n",
    "    df_feat.append(df_diff['dis'].max())\n",
    "    df_feat.append(df_diff['dis'].mean())\n",
    "\n",
    "    df_feat.append((df_diff['dis']/df_diff['time_seconds']).min())\n",
    "    df_feat.append((df_diff['dis']/df_diff['time_seconds']).max())\n",
    "    df_feat.append((df_diff['dis']/df_diff['time_seconds']).mean())\n",
    "    \n",
    "    return df_feat"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#read_feat('../input/hy_round1_train_20200102/4285.csv', True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_feat = Parallel(n_jobs=10)(delayed(read_feat)(path, True) \n",
    "                                 for path in glob.glob('data/hy_round1_train_20200102/*')[:])\n",
    "train_feat = pd.DataFrame(train_feat)\n",
    "\n",
    "test_feat = Parallel(n_jobs=10)(delayed(read_feat)(path, False) \n",
    "                                 for path in glob.glob('data/hy_round1_testA_20200102/*')[:])\n",
    "test_feat = pd.DataFrame(test_feat)\n",
    "test_feat = test_feat.sort_values(by=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(7000, 35)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_feat.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_feat[1] = train_feat[1].map({'围网':0,'刺网':1,'拖网':2})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LGBMClassifier(bagging_fraction=0.85, boosting='gbdt', boosting_type='gbdt',\n",
      "               class_weight=None, colsample_bytree=1.0, feature_fraction=0.75,\n",
      "               importance_type='split', lambda_l1=2, learning_rate=0.01,\n",
      "               max_depth=7, metric='multi_error', min_child_samples=5,\n",
      "               min_child_weight=0.001, min_split_gain=0.0, n_estimators=2000,\n",
      "               n_jobs=-1, num_class=3, num_leaves=31, num_threads=20,\n",
      "               objective='multiclass', random_state=None, reg_alpha=0.0,\n",
      "               reg_lambda=0.0, seed=99, silent=True, subsample=1.0,\n",
      "               subsample_for_bin=200000, subsample_freq=0, verbose=-1)\n",
      "1: Train 0.9253146 Val 0.6555845/0.6555845\n",
      "--------------------------------------------------\n",
      "2: Train 0.9150642 Val 0.6324312/0.6440079\n",
      "--------------------------------------------------\n",
      "3: Train 0.7367574 Val 0.6389412/0.6423190\n",
      "--------------------------------------------------\n",
      "4: Train 0.8897978 Val 0.6493929/0.6440875\n",
      "--------------------------------------------------\n",
      "5: Train 0.8797314 Val 0.6616644/0.6476028\n",
      "--------------------------------------------------\n",
      "6: Train 0.8163340 Val 0.6164265/0.6424068\n",
      "--------------------------------------------------\n",
      "7: Train 0.7991134 Val 0.6481515/0.6432275\n",
      "--------------------------------------------------\n",
      "8: Train 0.8955925 Val 0.6758954/0.6473109\n",
      "--------------------------------------------------\n",
      "9: Train 0.8123879 Val 0.5994028/0.6419878\n",
      "--------------------------------------------------\n",
      "10: Train 0.9460773 Val 0.6563351/0.6434226\n",
      "--------------------------------------------------\n",
      "Train:  [0.9253146226704442, 0.9150642012483566, 0.7367573990665802, 0.8897977945322149, 0.8797313625012642, 0.8163339622568134, 0.799113367929691, 0.8955924730710229, 0.8123878853363289, 0.9460772587735752]\n",
      "Val:  [0.6555844680043249, 0.6324312344958664, 0.6389412041485069, 0.6493929175327161, 0.6616644052464947, 0.6164265359529953, 0.6481514633143706, 0.6758953577632644, 0.5994027942356269, 0.6563351435722664]\n",
      "--------------------------------------------------\n",
      "Train0.86162_Test0.64342\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "n_fold = 10\n",
    "skf = StratifiedKFold(n_splits = n_fold, shuffle = True)\n",
    "eval_fun = f1_score\n",
    "\n",
    "def run_oof(clf, X_train, y_train, X_test, kf):\n",
    "    print(clf)\n",
    "    preds_train = np.zeros((len(X_train), 3), dtype = np.float)\n",
    "    preds_test = np.zeros((len(X_test), 3), dtype = np.float)\n",
    "    train_loss = []; test_loss = []\n",
    "\n",
    "    i = 1\n",
    "    for train_index, test_index in kf.split(X_train, y_train):\n",
    "        x_tr = X_train[train_index]; x_te = X_train[test_index]\n",
    "        y_tr = y_train[train_index]; y_te = y_train[test_index]\n",
    "        clf.fit(x_tr, y_tr, eval_set = [(x_te, y_te)], early_stopping_rounds = 500, verbose = False)\n",
    "        \n",
    "        train_loss.append(eval_fun(y_tr, np.argmax(clf.predict_proba(x_tr)[:], 1), average='macro'))\n",
    "        test_loss.append(eval_fun(y_te, np.argmax(clf.predict_proba(x_te)[:], 1), average='macro'))\n",
    "\n",
    "        preds_train[test_index] = clf.predict_proba(x_te)[:]\n",
    "        preds_test += clf.predict_proba(X_test)[:]\n",
    "\n",
    "        print('{0}: Train {1:0.7f} Val {2:0.7f}/{3:0.7f}'.format(i, train_loss[-1], test_loss[-1], np.mean(test_loss)))\n",
    "        print('-' * 50)\n",
    "        i += 1\n",
    "    print('Train: ', train_loss)\n",
    "    print('Val: ', test_loss)\n",
    "    print('-' * 50)\n",
    "    print('Train{0:0.5f}_Test{1:0.5f}\\n\\n'.format(np.mean(train_loss), np.mean(test_loss)))\n",
    "    preds_test /= n_fold\n",
    "    return preds_train, preds_test\n",
    "\n",
    "params = {\n",
    "    'learning_rate': 0.01,\n",
    "    'min_child_samples': 5,\n",
    "    'max_depth': 7,\n",
    "    'lambda_l1': 2,\n",
    "    'boosting': 'gbdt',\n",
    "    'objective': 'multiclass',\n",
    "    'n_estimators': 2000,\n",
    "    'metric': 'multi_error',\n",
    "    'num_class': 3,\n",
    "    'feature_fraction': .75,\n",
    "    'bagging_fraction': .85,\n",
    "    'seed': 99,\n",
    "    'num_threads': 20,\n",
    "    'verbose': -1\n",
    "}\n",
    "\n",
    "train_pred, test_pred = run_oof(lgb.LGBMClassifier(**params), \n",
    "                                train_feat.iloc[:, 2:].values, \n",
    "                                train_feat.iloc[:, 1].values, \n",
    "                                test_feat.iloc[:, 1:].values, \n",
    "                                skf)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "oof f1 0.6440191068129768\n",
      "train accuracy 0.7412857142857143\n"
     ]
    }
   ],
   "source": [
    "train_predh = np.argmax(train_pred, 1)\n",
    "print('oof f1', metrics.f1_score(train_feat[1],train_predh, average='macro'))\n",
    "print('train accuracy', accuracy_score(train_feat[1],train_predh))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_feat['label'] = np.argmax(test_pred, 1)\n",
    "test_feat['label'] = test_feat['label'].map({0:'围网',1:'刺网',2:'拖网'})\n",
    "test_feat[[0, 'label']].to_csv('baseline.csv',index=None, header=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
