{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Copyright (c) 2020, NVIDIA CORPORATION.\n",
    "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "you may not use this file except in compliance with the License.\n",
    "You may obtain a copy of the License at\n",
    "    http://www.apache.org/licenses/LICENSE-2.0\n",
    "Unless required by applicable law or agreed to in writing, software\n",
    "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "See the License for the specific language governing permissions and\n",
    "limitations under the License."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# RecSys - Submit - MultiGPU"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os, time\n",
    "#os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
    "VER = 330\n",
    "start = time.time()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from dask_cuda import LocalCUDACluster\n",
    "from dask.distributed import Client\n",
    "import dask_cudf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'0.14.0'"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pandas as pd, numpy as np, gc\n",
    "from datetime import datetime\n",
    "import matplotlib.pyplot as plt\n",
    "pd.set_option('display.max_columns', 500)\n",
    "pd.set_option('display.max_rows', 500)\n",
    "import cudf, cupy, time\n",
    "cudf.__version__"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Load Train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def add_freq_tweet(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['a_user_id', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['a_user_id', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2['idx'] = gf2.index\n",
    "    \n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf_unique = gf[['a_user_id', 'tweet_id']].drop_duplicates()\n",
    "\n",
    "    gf_unique = gf_unique.groupby(['a_user_id']).count().reset_index()    \n",
    "    gf_unique.columns = ['a_user_id_tmp', 'no_tweet']\n",
    "    gf1 = gf1.merge(gf_unique[['a_user_id_tmp', 'no_tweet']], how='left', left_on='b_user_id', right_on='a_user_id_tmp')\n",
    "    gf2 = gf2.merge(gf_unique[['a_user_id_tmp', 'no_tweet']], how='left', left_on='b_user_id', right_on='a_user_id_tmp') \n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    gf2 = gf2.sort_values('idx')\n",
    "    \n",
    "    train['no_tweet'] = gf1['no_tweet'].fillna(0).astype('int32').to_array()\n",
    "    valid['no_tweet'] = gf2['no_tweet'].fillna(0).astype('int32').to_array()\n",
    "\n",
    "def diff_time(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['timestamp', 'a_user_id', 'b_user_id', 'tweet_id', 'no_tweet']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['timestamp', 'a_user_id', 'b_user_id', 'tweet_id', 'no_tweet']]).reset_index(drop=True)\n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf = dask_cudf.from_cudf(gf, npartitions=16)\n",
    "    gf['timestamp'] = gf['timestamp'].astype('int64')/1e9\n",
    "    gf_unique = gf[['timestamp', 'a_user_id', 'tweet_id']].drop_duplicates()\n",
    "    gf_unique.columns = ['tmp_timestamp', 'tmp_a_user_id', 'tmp_tweet_id']\n",
    "    gf = gf[gf['no_tweet']!=0]\n",
    "    gf = gf.drop('no_tweet', axis=1)\n",
    "    gf = gf.drop('a_user_id', axis=1)\n",
    "    gf = gf.merge(gf_unique, how='left', left_on='b_user_id', right_on='tmp_a_user_id')\n",
    "    gf = gf[gf['tweet_id']!=gf['tmp_tweet_id']]\n",
    "    gf = gf[~gf['tmp_a_user_id'].isna()]\n",
    "\n",
    "    gf['diff_timestamp_prev'] = gf['timestamp']-gf['tmp_timestamp']\n",
    "    gf['diff_timestamp_after'] = gf['tmp_timestamp']-gf['timestamp']\n",
    "\n",
    "    gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']>0, 15*24*3600)\n",
    "    gf['diff_timestamp_prev'] = gf.diff_timestamp_prev.where(gf['diff_timestamp_prev']>0, 15*24*3600)\n",
    "\n",
    "    gf = gf[['tweet_id', \n",
    "             'b_user_id', \n",
    "             'diff_timestamp_prev', \n",
    "             'diff_timestamp_after']].groupby(['tweet_id', 'b_user_id']).min().reset_index()\n",
    "\n",
    "    gf.to_parquet('/tmp/gf')\n",
    "    del gf; del gf_unique; del gf1; del gf2; gc.collect()\n",
    "\n",
    "    gf = cudf.read_parquet('/tmp/gf/part.0.parquet')\n",
    "    gf1 = cudf.from_pandas(train[['b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['tweet_id', 'b_user_id'], right_on=['tweet_id', 'b_user_id'])\n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    train['diff_timestamp_prev'] = gf1['diff_timestamp_prev'].fillna(15*24*3600).astype('int32').to_array()\n",
    "    train['diff_timestamp_after'] = gf1['diff_timestamp_after'].fillna(15*24*3600).astype('int32').to_array()\n",
    "    del gf1; gc.collect()\n",
    "\n",
    "    gf1 = cudf.from_pandas(valid[['b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['tweet_id', 'b_user_id'], right_on=['tweet_id', 'b_user_id'])\n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    valid['diff_timestamp_prev'] = gf1['diff_timestamp_prev'].fillna(15*24*3600).astype('int32').to_array()\n",
    "    valid['diff_timestamp_after'] = gf1['diff_timestamp_after'].fillna(15*24*3600).astype('int32').to_array()\n",
    "    \n",
    "def add_diff_user1(train, valid, col):\n",
    "\n",
    "    gf1 = cudf.from_pandas(train[[col, 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[[col, 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2['idx'] = gf2.index\n",
    "    \n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf_lang = gf[['b_user_id', col, 'tweet_id']]#.drop_duplicates()\n",
    "    gf_lang = gf_lang[gf_lang[col]!=0]\n",
    "    gf_lang = gf_lang.groupby(['b_user_id', col]).count()\n",
    "    gf_lang = gf_lang.reset_index()\n",
    "    gf_lang = gf_lang[gf_lang['tweet_id']>3]\n",
    "    gf_lang = gf_lang.sort_values(['b_user_id', 'tweet_id'], ascending=False)\n",
    "    gf_lang['b_user_id_shifted'] = gf_lang['b_user_id'].shift(1)\n",
    "    gf_lang = gf_lang[gf_lang['b_user_id_shifted']!=gf_lang['b_user_id']]\n",
    "    gf_lang.columns = ['b_user_id_lang', 'top_' + col, 'drop1', 'drop2']\n",
    "    gf1 = gf1.merge(gf_lang[['b_user_id_lang', 'top_' + col, 'drop1', 'drop2']], how='left', left_on='b_user_id', right_on='b_user_id_lang')\n",
    "    gf2 = gf2.merge(gf_lang[['b_user_id_lang', 'top_' + col, 'drop1', 'drop2']], how='left', left_on='b_user_id', right_on='b_user_id_lang')\n",
    "    \n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    gf2 = gf2.sort_values('idx')\n",
    "    \n",
    "    gf1['same_' + col] = gf1[col] == gf1['top_' + col]\n",
    "    gf1['diff_' + col] = gf1[col] != gf1['top_' + col]\n",
    "    gf1['nan_' + col] = 0\n",
    "    gf1.loc[gf1['top_' + col].isna(), 'same_' + col] = 0\n",
    "    gf1.loc[gf1['top_' + col].isna(), 'diff_' + col] = 0\n",
    "    gf1.loc[gf1['top_' + col].isna(), 'nan_' + col] = 1\n",
    "    \n",
    "    gf2['same_' + col] = gf2[col] == gf2['top_' + col]\n",
    "    gf2['diff_' + col] = gf2[col] != gf2['top_' + col]\n",
    "    gf2['nan_' + col] = 0\n",
    "    gf2.loc[gf2['top_' + col].isna(), 'same_' + col] = 0\n",
    "    gf2.loc[gf2['top_' + col].isna(), 'diff_' + col] = 0\n",
    "    gf2.loc[gf2['top_' + col].isna(), 'nan_' + col] = 1\n",
    "    \n",
    "    train['same_' + col] = gf1['same_' + col].fillna(0).astype('int8').to_array()\n",
    "    train['diff_' + col] = gf1['diff_' + col].fillna(0).astype('int8').to_array()\n",
    "    train['nan_' + col] = gf1['nan_' + col].fillna(0).astype('int8').to_array()\n",
    "    \n",
    "    valid['same_' + col] = gf2['same_' + col].fillna(0).astype('int8').to_array()\n",
    "    valid['diff_' + col] = gf2['diff_' + col].fillna(0).astype('int8').to_array()\n",
    "    valid['nan_' + col] = gf2['nan_' + col].fillna(0).astype('int8').to_array()\n",
    "\n",
    "def add_diff_user1_fixed(train, valid, col):\n",
    "    col = 'tw_hash0'\n",
    "    gf1 = cudf.from_pandas(train[[col, 'tw_hash1', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[[col, 'tw_hash1', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2['idx'] = gf2.index\n",
    "    \n",
    "    gf_lang = cudf.concat([gf1[['tw_hash0', 'b_user_id', 'tweet_id']],\n",
    "                      gf1[['tw_hash1', 'b_user_id', 'tweet_id']],\n",
    "                      gf2[['tw_hash0', 'b_user_id', 'tweet_id']],\n",
    "                      gf2[['tw_hash1', 'b_user_id', 'tweet_id']]], axis=0)\n",
    "    gf_lang = gf_lang[['b_user_id', col, 'tweet_id']].drop_duplicates()\n",
    "    gf_lang = gf_lang[gf_lang[col]!=0]\n",
    "    gf_lang = gf_lang.groupby(['b_user_id', col]).count()\n",
    "    gf_lang = gf_lang.reset_index()\n",
    "    gf_lang = gf_lang[gf_lang['tweet_id']>3]\n",
    "    gf_lang = gf_lang.sort_values(['b_user_id', 'tweet_id'], ascending=False)\n",
    "    gf_lang['b_user_id_shifted'] = gf_lang['b_user_id'].shift(1)\n",
    "    gf_lang = gf_lang[gf_lang['b_user_id_shifted']!=gf_lang['b_user_id']]\n",
    "    gf_lang.columns = ['b_user_id_lang', 'top_' + col, 'drop1', 'drop2']\n",
    "    gf1 = gf1.merge(gf_lang[['b_user_id_lang', 'top_' + col, 'drop1', 'drop2']], how='left', left_on='b_user_id', right_on='b_user_id_lang')\n",
    "    gf2 = gf2.merge(gf_lang[['b_user_id_lang', 'top_' + col, 'drop1', 'drop2']], how='left', left_on='b_user_id', right_on='b_user_id_lang')\n",
    "    \n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    gf2 = gf2.sort_values('idx')\n",
    "    \n",
    "    gf1['same_' + col] = (gf1[col] == gf1['top_' + col]) | (gf1['tw_hash1'] == gf1['top_' + col])\n",
    "    gf1['diff_' + col] = (gf1[col] != gf1['top_' + col]) & (gf1['tw_hash1'] != gf1['top_' + col])\n",
    "    gf1['nan_' + col] = 0\n",
    "    gf1.loc[gf1['top_' + col].isna(), 'same_' + col] = 0\n",
    "    gf1.loc[gf1['top_' + col].isna(), 'diff_' + col] = 0\n",
    "    gf1.loc[gf1['top_' + col].isna(), 'nan_' + col] = 1\n",
    "    \n",
    "    gf2['same_' + col] = (gf2[col] == gf2['top_' + col]) | (gf2['tw_hash1'] == gf2['top_' + col])\n",
    "    gf2['diff_' + col] = (gf2[col] != gf2['top_' + col]) & (gf2['tw_hash1'] != gf2['top_' + col])\n",
    "    gf2['nan_' + col] = 0\n",
    "    gf2.loc[gf2['top_' + col].isna(), 'same_' + col] = 0\n",
    "    gf2.loc[gf2['top_' + col].isna(), 'diff_' + col] = 0\n",
    "    gf2.loc[gf2['top_' + col].isna(), 'nan_' + col] = 1\n",
    "    \n",
    "    train['same_' + col] = gf1['same_' + col].fillna(0).astype('int8').to_array()\n",
    "    train['diff_' + col] = gf1['diff_' + col].fillna(0).astype('int8').to_array()\n",
    "    train['nan_' + col] = gf1['nan_' + col].fillna(0).astype('int8').to_array()\n",
    "    \n",
    "    valid['same_' + col] = gf2['same_' + col].fillna(0).astype('int8').to_array()\n",
    "    valid['diff_' + col] = gf2['diff_' + col].fillna(0).astype('int8').to_array()\n",
    "    valid['nan_' + col] = gf2['nan_' + col].fillna(0).astype('int8').to_array()\n",
    "\n",
    "\n",
    "def add_timeshift(train, valid, shift=1):\n",
    "    gf1 = cudf.from_pandas(train[['timestamp', 'b_user_id']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['timestamp', 'b_user_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2['idx'] = gf2.index\n",
    "    gf1['type'] = 1\n",
    "    gf2['type'] = 2\n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "\n",
    "    gf = gf.sort_values(['b_user_id', 'timestamp'])\n",
    "    gf['timestamp'] = gf['timestamp'].astype('int64')/1e9\n",
    "    gf['b_user_id_shifted'] = gf['b_user_id'].shift(shift)\n",
    "    gf['b_timestamp_shifted'] = gf['timestamp'].shift(shift)\n",
    "    gf['b_timestamp_1'] = (gf['timestamp']-gf['b_timestamp_shifted']).abs()\n",
    "    gf.loc[gf['b_user_id']!=gf['b_user_id_shifted'], 'b_timestamp_1'] = 15*24*3600\n",
    "    gf = gf.sort_values(['idx'])\n",
    "\n",
    "    train['b_timestamp_' + str(shift)] = gf.loc[gf['type']==1, 'b_timestamp_1'].fillna(0).astype('int8').to_array()\n",
    "    valid['b_timestamp_' + str(shift)] = gf.loc[gf['type']==2, 'b_timestamp_1'].fillna(0).astype('int8').to_array()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "cluster = LocalCUDACluster()\n",
    "client = Client(cluster)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 48.1 s, sys: 1min 14s, total: 2min 2s\n",
      "Wall time: 9.93 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "train = pd.read_parquet( '../preprocessings/train-1.parquet' )\n",
    "test0 = pd.read_parquet( '../preprocessings/test-0.parquet' )\n",
    "test1 = pd.read_parquet( '../preprocessings/test-1.parquet' )\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((121386431, 27), (12434735, 27), (12434838, 27))"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train.shape, test0.shape, test1.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "test0['tr'] = 0\n",
    "test1['tr'] = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "valid = pd.concat([test0, test1], axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(24869573, 28)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "valid.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "valid = valid.reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "del test0; del test1; gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "txt = pd.read_parquet( '../preprocessings/text-processings-1.parquet' )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 16.6 s, sys: 19.6 s, total: 36.2 s\n",
      "Wall time: 33.9 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "train['timestamp'] = pd.to_datetime(train['timestamp'], unit=\"s\")\n",
    "train['a_account_creation'] = pd.to_datetime(train['a_account_creation'], unit=\"s\")\n",
    "train['b_account_creation'] = pd.to_datetime(train['b_account_creation'], unit=\"s\")\n",
    "\n",
    "valid['timestamp'] = pd.to_datetime(valid['timestamp'], unit=\"s\")\n",
    "valid['a_account_creation'] = pd.to_datetime(valid['a_account_creation'], unit=\"s\")\n",
    "valid['b_account_creation'] = pd.to_datetime(valid['b_account_creation'], unit=\"s\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "count_ats\n",
      "count_char\n",
      "count_words\n",
      "tw_hash0\n",
      "tw_hash1\n",
      "tw_rt_uhash\n"
     ]
    }
   ],
   "source": [
    "for col in ['count_ats', 'count_char', 'count_words', 'tw_hash0', 'tw_hash1', 'tw_rt_uhash']:\n",
    "    print(col)\n",
    "    train[col] = txt.iloc[:(train.shape[0]), ][col]\n",
    "    valid[col] = txt.iloc[(train.shape[0]):, ][col].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "del txt; gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 5.83 s, sys: 5.8 s, total: 11.6 s\n",
      "Wall time: 11 s\n"
     ]
    }
   ],
   "source": [
    "%%time \n",
    "# TIME FEATURES\n",
    "# RAPIDS does this 5x faster than Pandas CPU\n",
    "# If we didn't need to copy CPU to GPU to CPU, then 1300x faster!\n",
    "def split_time(df):\n",
    "    gf = cudf.from_pandas(df[['timestamp']])\n",
    "    df['dt_dow']  = gf['timestamp'].dt.weekday.to_array() \n",
    "    df['dt_hour'] = gf['timestamp'].dt.hour.to_array()\n",
    "    df['dt_minute'] = gf['timestamp'].dt.minute.to_array()\n",
    "    df['dt_second'] = gf['timestamp'].dt.second.to_array()\n",
    "    return\n",
    "\n",
    "split_time(train)\n",
    "split_time(valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "# DROP UNUSED COLUMNS\n",
    "cols_drop = ['links','hashtags']\n",
    "train.drop(cols_drop,inplace=True,axis=1)\n",
    "valid.drop(cols_drop,inplace=True,axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1min 41s, sys: 14.7 s, total: 1min 55s\n",
      "Wall time: 1min 51s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# SHUFFLE ROWS because Giba's files have b users in separate files\n",
    "#train.sort_index(inplace=True) # ORIGINAL RANDOM RECSYS ORDER\n",
    "train = train.sort_values('timestamp').reset_index(drop=True) #TIME ORDER"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Converting uint8 to int8...\n",
      "media max value = 12 , tweet_type max value = 2 , language max value = 65 , Converting uint8 to int8...\n",
      "media max value = 12 , tweet_type max value = 2 , language max value = 65 , CPU times: user 561 ms, sys: 363 ms, total: 923 ms\n",
      "Wall time: 833 ms\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "# RAPIDS DOESNT IMPLEMENT UINT\n",
    "def convert2int(df):\n",
    "    print('Converting uint8 to int8...')\n",
    "    for c in df.columns:\n",
    "        if df[c].dtype=='uint8':\n",
    "            print(c,'max value =',df[c].max(),', ',end='')\n",
    "            df[c] = df[c].astype('int8')\n",
    "            \n",
    "convert2int(train)\n",
    "convert2int(valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1.85 s, sys: 1.68 s, total: 3.52 s\n",
      "Wall time: 3.36 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "label_names = ['reply', 'retweet', 'retweet_comment', 'like']\n",
    "train['engage_time'] = train[label_names].min(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 7.35 s, sys: 8.47 s, total: 15.8 s\n",
      "Wall time: 15 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# ELAPSED TIME\n",
    "gf = cudf.from_pandas(train[['engage_time','timestamp']])\n",
    "gf = gf.astype('int64')/1e9\n",
    "gf.loc[gf.engage_time==0,'engage_time'] = np.nan\n",
    "gf['elapsed_time'] = gf['engage_time'] - gf['timestamp']\n",
    "train['elapsed_time'] = gf.elapsed_time.astype('float32').to_array()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "del gf; gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 10.5 s, sys: 13 s, total: 23.5 s\n",
      "Wall time: 22 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "train['reply'] = (train['reply']>0).astype('int8')\n",
    "train['retweet'] = (train['retweet']>0).astype('int8')\n",
    "train['retweet_comment'] = (train['retweet_comment']>0).astype('int8')\n",
    "train['like'] = (train['like']>0).astype('int8')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Feature Engineering "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "# RENAME TEST TO VALID and then use exact code from validation notebook\n",
    "# valid = test\n",
    "# del test; x=gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((121386431, 37), (24869573, 36))"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train.shape,valid.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2.89 s, sys: 2.25 s, total: 5.14 s\n",
      "Wall time: 5.03 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "add_diff_user1(train, valid, 'tw_rt_uhash')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 5.09 s, sys: 4.45 s, total: 9.53 s\n",
      "Wall time: 9.18 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "add_diff_user1_fixed(train, valid, 'tw_userid0')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 3.57 s, sys: 3.38 s, total: 6.95 s\n",
      "Wall time: 6.65 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "add_freq_tweet(train, valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "add_timeshift(train, valid, shift=1)\n",
    "add_timeshift(train, valid, shift=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 10.6 s, sys: 15.7 s, total: 26.3 s\n",
      "Wall time: 1min 9s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "diff_time(train, valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 18.1 s, sys: 8.7 s, total: 26.8 s\n",
      "Wall time: 25.9 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "train.loc[train['tw_hash0']==0,'diff_tw_hash0'] = 0\n",
    "train.loc[train['tw_hash0']==0,'same_tw_hash0'] = 0\n",
    "\n",
    "valid.loc[valid['tw_hash0']==0,'diff_tw_hash0'] = 0\n",
    "valid.loc[valid['tw_hash0']==0,'same_tw_hash0'] = 0\n",
    "\n",
    "train.loc[train['tw_rt_uhash']==0,'diff_tw_rt_uhash'] = 0\n",
    "train.loc[train['tw_rt_uhash']==0,'same_tw_rt_uhash'] = 0\n",
    "\n",
    "valid.loc[valid['tw_rt_uhash']==0,'diff_tw_rt_uhash'] = 0\n",
    "valid.loc[valid['tw_rt_uhash']==0,'same_tw_rt_uhash'] = 0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Target Encode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import KFold\n",
    "def target_encode_cudf_v3(train, valid, col, tar, n_folds=5, min_ct=0, smooth=20, \n",
    "                          seed=42, shuffle=False, t2=None, v2=None, x=-1):\n",
    "    #\n",
    "    # col = column to target encode (or if list of columns then multiple groupby)\n",
    "    # tar = tar column encode against\n",
    "    # if min_ct>0 then all classes with <= min_ct are consider in new class \"other\"\n",
    "    # smooth = Bayesian smooth parameter\n",
    "    # seed = for 5 Fold if shuffle==True\n",
    "    # if x==-1 result appended to train and valid\n",
    "    # if x>=0 then result returned in column x of t2 and v2\n",
    "    #    \n",
    "    \n",
    "    # SINGLE OR MULTIPLE COLUMN\n",
    "    if not isinstance(col, list): col = [col]\n",
    "    if (min_ct>0)&(len(col)>1): \n",
    "        print('WARNING: Setting min_ct=0 with multiple columns. Not implemented')\n",
    "        min_ct = 0\n",
    "    name = \"_\".join(col)\n",
    "        \n",
    "    # FIT ALL TRAIN\n",
    "    gf = cudf.from_pandas(train[col+[tar]]).reset_index(drop=True)\n",
    "    gf['idx'] = gf.index #needed because cuDF merge returns out of order\n",
    "    if min_ct>0: # USE MIN_CT?\n",
    "        other = gf.groupby(col[0]).size(); other = other[other<=min_ct].index\n",
    "        save = gf[col[0]].values.copy()\n",
    "        gf.loc[gf[col[0]].isin(other),col[0]] = -1\n",
    "    te = gf.groupby(col)[[tar]].agg(['mean','count']).reset_index(); te.columns = col + ['m','c']\n",
    "    mn = gf[tar].mean().astype('float32')\n",
    "    te['smooth'] = ((te['m']*te['c'])+(mn*smooth)) / (te['c']+smooth)\n",
    "    if min_ct>0: gf[col[0]] = save.copy()\n",
    "    \n",
    "    # PREDICT VALID\n",
    "    gf2 = cudf.from_pandas(valid[col]).reset_index(drop=True); gf2['idx'] = gf2.index\n",
    "    if min_ct>0: gf2.loc[gf2[col[0]].isin(other),col[0]] = -1\n",
    "    gf2 = gf2.merge(te[col+['smooth']], on=col, how='left', sort=False).sort_values('idx')\n",
    "    if x==-1: valid[f'TE_{name}_{tar}'] = gf2['smooth'].fillna(mn).astype('float32').to_array()\n",
    "    elif x>=0: v2[:,x] = gf2['smooth'].fillna(mn).astype('float32').to_array()\n",
    "    \n",
    "    # KFOLD ON TRAIN\n",
    "    tmp = cupy.zeros((train.shape[0]),dtype='float32'); gf['fold'] = 0\n",
    "    if shuffle: # shuffling is 2x slower\n",
    "        kf = KFold(n_folds, random_state=seed, shuffle=shuffle)\n",
    "        for k,(idxT,idxV) in enumerate(kf.split(train)): gf.loc[idxV,'fold'] = k\n",
    "    else:\n",
    "        fsize = train.shape[0]//n_folds\n",
    "        gf['fold'] = cupy.clip(gf.idx.values//fsize,0,n_folds-1)\n",
    "    for k in range(n_folds):\n",
    "        if min_ct>0: # USE MIN CT?\n",
    "            if k<n_folds-1: save = gf[col[0]].values.copy()\n",
    "            other = gf.loc[gf.fold!=k].groupby(col[0]).size(); other = other[other<=min_ct].index\n",
    "            gf.loc[gf[col[0]].isin(other),col[0]] = -1\n",
    "        te = gf.loc[gf.fold!=k].groupby(col)[[tar]].agg(['mean','count']).reset_index(); \n",
    "        te.columns = col + ['m','c']\n",
    "        mn = gf.loc[gf.fold!=k,tar].mean().astype('float32')\n",
    "        te['smooth'] = ((te['m']*te['c'])+(mn*smooth)) / (te['c']+smooth)\n",
    "        gf = gf.merge(te[col+['smooth']], on=col, how='left', sort=False).sort_values('idx')\n",
    "        tmp[(gf.fold.values==k)] = gf.loc[gf.fold==k,'smooth'].fillna(mn).astype('float32').values\n",
    "        gf.drop_column('smooth')\n",
    "        if (min_ct>0)&(k<n_folds-1): gf[col[0]] = save.copy()\n",
    "    if x==-1: train[f'TE_{name}_{tar}'] = cupy.asnumpy(tmp.astype('float32'))\n",
    "    elif x>=0: t2[:,x] = cupy.asnumpy(tmp.astype('float32'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPU STORAGE FOR NEW FEATURES\n",
    "# This is faster than adding each new column to Pandas dataframe\n",
    "train2 = np.zeros((train.shape[0],28),dtype='float32')\n",
    "valid2 = np.zeros((valid.shape[0],28),dtype='float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "TE media reply 12.2 seconds\n",
      "TE media retweet 7.7 seconds\n",
      "TE media retweet_comment 7.2 seconds\n",
      "TE media like 6.8 seconds\n",
      "TE tweet_type reply 6.3 seconds\n",
      "TE tweet_type retweet 6.2 seconds\n",
      "TE tweet_type retweet_comment 6.3 seconds\n",
      "TE tweet_type like 6.6 seconds\n",
      "TE language reply 8.3 seconds\n",
      "TE language retweet 7.0 seconds\n",
      "TE language retweet_comment 6.3 seconds\n",
      "TE language like 6.4 seconds\n",
      "TE a_user_id reply 12.8 seconds\n",
      "TE a_user_id retweet 12.6 seconds\n",
      "TE a_user_id retweet_comment 12.0 seconds\n",
      "TE a_user_id like 11.6 seconds\n",
      "TE b_user_id reply 15.3 seconds\n",
      "TE b_user_id retweet 13.5 seconds\n",
      "TE b_user_id retweet_comment 15.2 seconds\n",
      "TE b_user_id like 13.3 seconds\n",
      "TE tw_hash0 reply 7.9 seconds\n",
      "TE tw_hash0 retweet 7.4 seconds\n",
      "TE tw_hash0 retweet_comment 7.8 seconds\n",
      "TE tw_hash0 like 9.2 seconds\n",
      "TE tw_rt_uhash reply 8.8 seconds\n",
      "TE tw_rt_uhash retweet 8.2 seconds\n",
      "TE tw_rt_uhash retweet_comment 8.3 seconds\n",
      "TE tw_rt_uhash like 10.5 seconds\n",
      "CPU times: user 2min 33s, sys: 1min 58s, total: 4min 31s\n",
      "Wall time: 4min 21s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# cuDF TE ENCODING IS SUPER FAST!!\n",
    "idx = 0; cols = []\n",
    "for c in ['media', 'tweet_type', 'language', 'a_user_id', 'b_user_id', 'tw_hash0', 'tw_rt_uhash']:\n",
    "    for t in ['reply', 'retweet', 'retweet_comment', 'like']:\n",
    "        start = time.time()\n",
    "        target_encode_cudf_v3(train, valid, col=c, tar=t, smooth=20, min_ct=0,\n",
    "                              t2=train2, v2=valid2, x=idx, shuffle=False)\n",
    "        end = time.time(); idx += 1\n",
    "        cols.append(f'TE_{c}_{t}')\n",
    "        print('TE',c,t,'%.1f seconds'%(end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 8.3 s, sys: 2.58 s, total: 10.9 s\n",
      "Wall time: 10.3 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE VALID HAS INDEX 0,1,2,3...\n",
    "valid = pd.concat([valid,pd.DataFrame(valid2,columns=cols)],axis=1)\n",
    "del valid2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 38.4 s, sys: 12 s, total: 50.3 s\n",
      "Wall time: 47.6 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE TRAIN HAS INDEX 0,1,2,3...\n",
    "train = pd.concat([train,pd.DataFrame(train2,columns=cols)],axis=1)\n",
    "del train2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Mulitple Column Target Encode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPU STORAGE FOR NEW FEATURES\n",
    "# This is faster than adding each new column to Pandas dataframe\n",
    "train2 = np.zeros((train.shape[0],4),dtype='float32')\n",
    "valid2 = np.zeros((valid.shape[0],4),dtype='float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "TE mult reply 50.6 seconds\n",
      "TE mult retweet 16.8 seconds\n",
      "TE mult retweet_comment 16.5 seconds\n",
      "TE mult like 16.8 seconds\n",
      "CPU times: user 57.5 s, sys: 48 s, total: 1min 45s\n",
      "Wall time: 1min 40s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# cuDF TE ENCODING IS SUPER FAST!!\n",
    "idx = 0; cols = []\n",
    "c = ['domains','language','b_follows_a','tweet_type','media','a_is_verified']\n",
    "for t in ['reply', 'retweet', 'retweet_comment', 'like']:\n",
    "    start = time.time()\n",
    "    target_encode_cudf_v3(train, valid, col=c, tar=t, smooth=20, min_ct=0,\n",
    "                            t2=train2, v2=valid2, x=idx, shuffle=False)\n",
    "    end = time.time(); idx += 1\n",
    "    cols.append(f'TE_mult_{t}')\n",
    "    print('TE','mult',t,'%.1f seconds'%(end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2.09 s, sys: 2.08 s, total: 4.17 s\n",
      "Wall time: 4.01 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE VALID HAS INDEX 0,1,2,3...\n",
    "valid = pd.concat([valid,pd.DataFrame(valid2,columns=cols)],axis=1)\n",
    "del valid2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 8.22 s, sys: 9.55 s, total: 17.8 s\n",
      "Wall time: 17.4 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE TRAIN HAS INDEX 0,1,2,3...\n",
    "train = pd.concat([train,pd.DataFrame(train2,columns=cols)],axis=1)\n",
    "del train2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Elapsed Time Target Encode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPU STORAGE FOR NEW FEATURES\n",
    "# This is faster than adding each new column to Pandas dataframe\n",
    "train2 = np.zeros((train.shape[0],5),dtype='float32')\n",
    "valid2 = np.zeros((valid.shape[0],5),dtype='float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "TE media elapsed_time 43.5 seconds\n",
      "TE tweet_type elapsed_time 5.3 seconds\n",
      "TE language elapsed_time 5.3 seconds\n",
      "TE a_user_id elapsed_time 10.7 seconds\n",
      "TE b_user_id elapsed_time 12.9 seconds\n",
      "CPU times: user 40.7 s, sys: 40.6 s, total: 1min 21s\n",
      "Wall time: 1min 17s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# cuDF TE ENCODING IS SUPER FAST!!\n",
    "idx = 0; cols = []\n",
    "for c in ['media', 'tweet_type', 'language', 'a_user_id', 'b_user_id']:\n",
    "    for t in ['elapsed_time']:\n",
    "        start = time.time()\n",
    "        target_encode_cudf_v3(train, valid, col=c, tar=t, smooth=20, min_ct=0,\n",
    "                              t2=train2, v2=valid2, x=idx, shuffle=False)\n",
    "        end = time.time(); idx += 1\n",
    "        cols.append(f'TE_{c}_{t}')\n",
    "        print('TE',c,t,'%.1f seconds'%(end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2.07 s, sys: 2.33 s, total: 4.4 s\n",
      "Wall time: 4.24 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE VALID HAS INDEX 0,1,2,3...\n",
    "valid = pd.concat([valid,pd.DataFrame(valid2,columns=cols)],axis=1)\n",
    "del valid2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 8.85 s, sys: 10.4 s, total: 19.3 s\n",
      "Wall time: 18.9 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE TRAIN HAS INDEX 0,1,2,3...\n",
    "train = pd.concat([train,pd.DataFrame(train2,columns=cols)],axis=1)\n",
    "del train2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Count Encode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "def count_encode_cudf_v2(train,valid,col,t2=None,v2=None,x=-1):\n",
    "    #\n",
    "    # col = column to count encode\n",
    "    # if x==-1 then result appended to train and valid\n",
    "    # if x>=0 then result returned in numpy arrays t2 and v2\n",
    "    #    make sure x is even because it returns in x and x+1 column\n",
    "    #\n",
    "    # COUNT TRAIN SEPARATELY\n",
    "    gf = cudf.from_pandas(train[[col]]).reset_index(drop=True); gf['idx'] = gf.index\n",
    "    te = gf.groupby(col)[['idx']].agg('count').rename({'idx':'ct'})\n",
    "    gf = gf.merge(te,left_on=col,right_index=True,how='left').sort_values('idx')\n",
    "    if x==-1: train[f'CE_{col}_norm'] = (gf.ct/len(gf)).astype('float32').to_array()\n",
    "    elif x>=0: \n",
    "        t2[:,x] = (gf.ct/len(gf)).astype('float32').to_array()\n",
    "        #t2[:,x+1] = gf.ct.astype('float32').to_array()\n",
    "\n",
    "    # COUNT VALID SEPARATELY\n",
    "    gf2 = cudf.from_pandas(valid[[col]]).reset_index(drop=True); gf2['idx'] = gf2.index\n",
    "    te = gf2.groupby(col)[['idx']].agg('count').rename({'idx':'ct'})\n",
    "    gf2 = gf2.merge(te,left_on=col,right_index=True,how='left').sort_values('idx')\n",
    "    if x==-1: valid[f'CE_{col}_norm'] = (gf2.ct/len(gf2)).astype('float32').to_array()\n",
    "    elif x>=0: \n",
    "        v2[:,x] = (gf2.ct/len(gf2)).astype('float32').to_array()\n",
    "        #v2[:,x+1] = gf2.ct.astype('float32').to_array()\n",
    "        \n",
    "    # COUNT TRAIN VALID TOGETHER\n",
    "    gf3 = cudf.concat([gf,gf2],axis=0)\n",
    "    te = gf3.groupby(col)[['idx']].agg('count').rename({'idx':'ct2'})\n",
    "    gf = gf.merge(te,left_on=col,right_index=True,how='left').sort_values('idx')\n",
    "    gf2 = gf2.merge(te,left_on=col,right_index=True,how='left').sort_values('idx')\n",
    "    if x==-1:\n",
    "        train[f'CE_{col}'] = gf.ct2.astype('float32').to_array()\n",
    "        valid[f'CE_{col}'] = gf2.ct2.astype('float32').to_array()\n",
    "    elif x>=0:\n",
    "        t2[:,x+1] = gf.ct2.astype('float32').to_array()\n",
    "        v2[:,x+1] = gf2.ct2.astype('float32').to_array()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPU STORAGE FOR NEW FEATURES\n",
    "# This is faster than adding each new column to Pandas dataframe\n",
    "train2 = np.zeros((train.shape[0],10),dtype='float32')\n",
    "valid2 = np.zeros((valid.shape[0],10),dtype='float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CE media 45.4 seconds\n",
      "CE tweet_type 3.7 seconds\n",
      "CE language 3.6 seconds\n",
      "CE a_user_id 4.7 seconds\n",
      "CE b_user_id 5.3 seconds\n",
      "CPU times: user 33.9 s, sys: 32.8 s, total: 1min 6s\n",
      "Wall time: 1min 2s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# cuDF CE ENCODING IS SUPER FAST!!\n",
    "idx = 0; cols = []\n",
    "for c in ['media', 'tweet_type', 'language', 'a_user_id', 'b_user_id']:\n",
    "        start = time.time()\n",
    "        count_encode_cudf_v2(train,valid,col=c,t2=train2,v2=valid2,x=idx)\n",
    "        end = time.time(); idx += 2\n",
    "        cols.append(f'CE_{c}_norm')\n",
    "        cols.append(f'CE_{c}')\n",
    "        print('CE',c,'%.1f seconds'%(end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 3.14 s, sys: 2.43 s, total: 5.57 s\n",
      "Wall time: 5.32 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE VALID HAS INDEX 0,1,2,3...\n",
    "valid = pd.concat([valid,pd.DataFrame(valid2,columns=cols)],axis=1)\n",
    "del valid2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 13.8 s, sys: 12.8 s, total: 26.5 s\n",
      "Wall time: 25.7 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE TRAIN HAS INDEX 0,1,2,3...\n",
    "train = pd.concat([train,pd.DataFrame(train2,columns=cols)],axis=1)\n",
    "del train2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Difference Encode (Lag Features)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "def diff_encode_cudf_v1(train,col,tar,sort_col=None,sft=1,t2=None,x=0):\n",
    "    if sort_col is None: \n",
    "        gf = cudf.from_pandas(train[[col, tar]]).reset_index(drop=True)\n",
    "        gf['idx'] = gf.index        \n",
    "        gf = gf.sort_values([col])\n",
    "    else: \n",
    "        gf = cudf.from_pandas(train[[col, tar, sort_col]]).reset_index(drop=True)\n",
    "        gf['idx'] = gf.index\n",
    "        gf = gf.sort_values([col,sort_col])\n",
    "    gf[col+'_sft'] = gf[col].shift(sft)\n",
    "    gf[tar+'_sft'] = gf[tar].shift(sft)\n",
    "    gf[tar+'_diff'] = gf[tar]-gf[tar+'_sft']\n",
    "    gf.loc[gf[col]!=gf[col+'_sft'], tar+'_diff'] = 0\n",
    "    gf = gf.sort_values(['idx'])\n",
    "    if t2 is None: train[tar+'_diff'] = gf[tar+'_diff'].fillna(0).astype('float32').to_array()\n",
    "    else: t2[:,x] = gf[tar+'_diff'].fillna(0).astype('float32').to_array()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPU STORAGE FOR NEW FEATURES\n",
    "# This is faster than adding each new column to Pandas dataframe\n",
    "train2 = np.zeros((train.shape[0],6),dtype='float32')\n",
    "valid2 = np.zeros((valid.shape[0],6),dtype='float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DE b_user_id b_follower_count 1 51.6 seconds\n",
      "DE b_user_id b_follower_count -1 6.6 seconds\n",
      "DE b_user_id b_following_count 1 6.6 seconds\n",
      "DE b_user_id b_following_count -1 6.5 seconds\n",
      "DE b_user_id language 1 5.9 seconds\n",
      "DE b_user_id language -1 6.5 seconds\n",
      "CPU times: user 45.6 s, sys: 42.9 s, total: 1min 28s\n",
      "Wall time: 1min 23s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# cuDF DE ENCODING IS FAST!!\n",
    "idx = 0; cols = []; sc = 'timestamp'\n",
    "for c in ['b_user_id']:\n",
    "    for t in ['b_follower_count','b_following_count','language']:\n",
    "        for s in [1,-1]:\n",
    "            start = time.time()\n",
    "            diff_encode_cudf_v1(train, col=c, tar=t, sft=s, sort_col=sc, t2=train2, x=idx)\n",
    "            diff_encode_cudf_v1(valid, col=c, tar=t, sft=s, sort_col=sc, t2=valid2, x=idx)\n",
    "            end = time.time(); idx += 1\n",
    "            cols.append(f'DE_{c}_{t}_{s}')\n",
    "            print('DE',c,t,s,'%.1f seconds'%(end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2.59 s, sys: 2.73 s, total: 5.32 s\n",
      "Wall time: 5.09 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE VALID HAS INDEX 0,1,2,3...\n",
    "valid = pd.concat([valid,pd.DataFrame(valid2,columns=cols)],axis=1)\n",
    "del valid2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 11.2 s, sys: 12.6 s, total: 23.8 s\n",
      "Wall time: 23.2 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# MAKE SURE TRAIN HAS INDEX 0,1,2,3...\n",
    "train = pd.concat([train,pd.DataFrame(train2,columns=cols)],axis=1)\n",
    "del train2; x=gc.collect()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Diff Language"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "def add_diff_language(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['a_user_id', 'language', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['a_user_id', 'language', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2['idx'] = gf2.index\n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf_lang = gf[['a_user_id', 'language', 'tweet_id']].drop_duplicates()\n",
    "    gf_lang = gf_lang.groupby(['a_user_id', 'language']).count().reset_index()\n",
    "    gf_lang = gf_lang.sort_values(['a_user_id', 'tweet_id'], ascending=False)\n",
    "    gf_lang['a_user_shifted'] = gf_lang['a_user_id'].shift(1)\n",
    "    gf_lang = gf_lang[gf_lang['a_user_shifted']!=gf_lang['a_user_id']]\n",
    "    gf_lang.columns = ['a_user_id_lang', 'top_tweet_language', 'drop1', 'drop2']\n",
    "    gf1 = gf1.merge(gf_lang[['a_user_id_lang', 'top_tweet_language']], how='left', left_on='b_user_id', right_on='a_user_id_lang')\n",
    "    gf2 = gf2.merge(gf_lang[['a_user_id_lang', 'top_tweet_language']], how='left', left_on='b_user_id', right_on='a_user_id_lang')\n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    gf2 = gf2.sort_values('idx')\n",
    "    gf1['same_language'] = gf1['language'] == gf1['top_tweet_language']\n",
    "    gf1['diff_language'] = gf1['language'] != gf1['top_tweet_language']\n",
    "    gf1['nan_language'] = 0\n",
    "    gf1.loc[gf1['top_tweet_language'].isna(), 'same_language'] = 0\n",
    "    gf1.loc[gf1['top_tweet_language'].isna(), 'diff_language'] = 0\n",
    "    gf1.loc[gf1['top_tweet_language'].isna(), 'nan_language'] = 1\n",
    "    gf2['same_language'] = gf2['language'] == gf2['top_tweet_language']\n",
    "    gf2['diff_language'] = gf2['language'] != gf2['top_tweet_language']\n",
    "    gf2['nan_language'] = 0\n",
    "    gf2.loc[gf2['top_tweet_language'].isna(), 'same_language'] = 0\n",
    "    gf2.loc[gf2['top_tweet_language'].isna(), 'diff_language'] = 0\n",
    "    gf2.loc[gf2['top_tweet_language'].isna(), 'nan_language'] = 1\n",
    "    train['same_language'] = gf1['same_language'].fillna(0).astype('int32').to_array()\n",
    "    train['diff_language'] = gf1['diff_language'].fillna(0).astype('int32').to_array()\n",
    "    train['nan_language'] = gf1['nan_language'].fillna(0).astype('int32').to_array()\n",
    "    valid['same_language'] = gf2['same_language'].fillna(0).astype('int32').to_array()\n",
    "    valid['diff_language'] = gf2['diff_language'].fillna(0).astype('int32').to_array()\n",
    "    valid['nan_language'] = gf2['nan_language'].fillna(0).astype('int32').to_array()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 25.6 s, sys: 33.3 s, total: 58.9 s\n",
      "Wall time: 55.4 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "add_diff_language(train,valid)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Follower Ratio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1.76 s, sys: 1.2 s, total: 2.96 s\n",
      "Wall time: 2.77 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# follow rate feature\n",
    "train['a_ff_rate'] = (train['a_following_count'] / train['a_follower_count']).astype('float32')\n",
    "train['b_ff_rate'] = (train['b_follower_count']  / train['b_following_count']).astype('float32')\n",
    "valid['a_ff_rate']  = (valid['a_following_count'] / valid['a_follower_count']).astype('float32')\n",
    "valid['b_ff_rate']  = (valid['b_follower_count']  / valid['b_following_count']).astype('float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "train.to_parquet('results/sub_train.parquet')\n",
    "valid.to_parquet('results/sub_valid.parquet')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "#train = pd.read_parquet('/recsys_features2/sub_train.parquet')\n",
    "#valid = pd.read_parquet('/recsys_features2/sub_valid.parquet')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1.69 s, sys: 1.21 s, total: 2.9 s\n",
      "Wall time: 2.72 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# follow rate feature\n",
    "train['ab_fing_rate'] = (train['a_following_count'] / train['b_following_count']).astype('float32')\n",
    "train['ab_fer_rate'] = (train['a_follower_count'] / train['b_follower_count']).astype('float32')\n",
    "valid['ab_fing_rate'] = (valid['a_following_count'] / valid['b_following_count']).astype('float32')\n",
    "valid['ab_fer_rate'] = (valid['a_follower_count'] / valid['b_follower_count']).astype('float32')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 15.3 s, sys: 7.71 s, total: 23.1 s\n",
      "Wall time: 21.6 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "train['a_age'] = (datetime(2020, 2, 1)-train['a_account_creation']).dt.days/30\n",
    "train['b_age'] = (datetime(2020, 2, 1)-train['b_account_creation']).dt.days/30\n",
    "train['ab_age_dff'] = (train['a_account_creation']-train['b_account_creation']).dt.days/30\n",
    "train['ab_age_rate'] = train['a_age']/train['b_age']\n",
    "\n",
    "valid['a_age'] = (datetime(2020, 2, 1)-valid['a_account_creation']).dt.days/30\n",
    "valid['b_age'] = (datetime(2020, 2, 1)-valid['b_account_creation']).dt.days/30\n",
    "valid['ab_age_dff'] = (valid['a_account_creation']-valid['b_account_creation']).dt.days/30\n",
    "valid['ab_age_rate'] = valid['a_age']/valid['b_age']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [],
   "source": [
    "def follower_chain_2(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['a_user_id', 'b_user_id', 'b_follows_a']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['a_user_id', 'b_user_id', 'b_follows_a']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2['idx'] = gf2.index\n",
    "    \n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf = gf[gf['b_follows_a']]\n",
    "    gf.drop_column('idx')\n",
    "    gf.drop_column('b_follows_a')\n",
    "    gf = gf.drop_duplicates()\n",
    "    \n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['b_user_id', 'a_user_id'])\n",
    "    gf1.columns = ['a_user_id', 'b_user_id', 'b_follows_a', 'idx', 'a_user_id_2', 'b_user_id_2']\n",
    "    gf1['a_follows_b'] = 0\n",
    "    gf1.loc[gf1['a_user_id_2']>0, 'a_follows_b'] = 1\n",
    "    gf1.drop_column('a_user_id_2')\n",
    "    gf1.drop_column('b_user_id_2')\n",
    "    \n",
    "    gf2 = gf2.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['b_user_id', 'a_user_id'])\n",
    "    gf2.columns = ['a_user_id', 'b_user_id', 'b_follows_a', 'idx', 'a_user_id_2', 'b_user_id_2']\n",
    "    gf2['a_follows_b'] = 0\n",
    "    gf2.loc[gf2['a_user_id_2']>0, 'a_follows_b'] = 1\n",
    "    gf2.drop_column('a_user_id_2')\n",
    "    gf2.drop_column('b_user_id_2')\n",
    "    \n",
    "    gf = gf.merge(gf, how='left', left_on='b_user_id', right_on='a_user_id')\n",
    "    gf = gf[gf['a_user_id_y']>0]\n",
    "    gf.drop_column('a_user_id_y')\n",
    "    gf.columns = ['a_user_id', 'b_user_id', 'b_user_id_2']\n",
    "    gf.drop_column('b_user_id')\n",
    "    gf = gf.drop_duplicates()\n",
    "    gf['b_user_id'] = 0\n",
    "    \n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['a_user_id', 'b_user_id_2'])\n",
    "    gf1['b_follows_a_2'] = 0\n",
    "    gf1.loc[gf1['b_user_id_2']>0, 'b_follows_a_2'] = 1\n",
    "    gf1.drop_column('b_user_id_y')\n",
    "    gf1.drop_column('b_user_id_2')\n",
    "    gf1.columns = ['a_user_id', 'b_user_id', 'b_follows_a', 'idx', 'a_follows_b', 'b_follows_a_2']\n",
    "    \n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['b_user_id', 'a_user_id'], right_on=['a_user_id', 'b_user_id_2'])\n",
    "    gf1['a_follows_b_2'] = 0\n",
    "    gf1.loc[gf1['b_user_id_2']>0, 'a_follows_b_2'] = 1\n",
    "    gf1.drop_column('b_user_id_y')\n",
    "    gf1.drop_column('b_user_id_2')\n",
    "    gf1.drop_column('a_user_id_y')\n",
    "    gf1.columns = ['a_user_id', 'b_user_id', 'b_follows_a', 'idx', 'a_follows_b', 'b_follows_a_2', 'a_follows_a_2']\n",
    "    \n",
    "    gf2 = gf2.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['a_user_id', 'b_user_id_2'])\n",
    "    gf2['b_follows_a_2'] = 0\n",
    "    gf2.loc[gf2['b_user_id_2']>0, 'b_follows_a_2'] = 1\n",
    "    gf2.drop_column('b_user_id_y')\n",
    "    gf2.drop_column('b_user_id_2')\n",
    "    gf2.columns = ['a_user_id', 'b_user_id', 'b_follows_a', 'idx', 'a_follows_b', 'b_follows_a_2']\n",
    "    \n",
    "    gf2 = gf2.merge(gf, how='left', left_on=['b_user_id', 'a_user_id'], right_on=['a_user_id', 'b_user_id_2'])\n",
    "    gf2['a_follows_b_2'] = 0\n",
    "    gf2.loc[gf2['b_user_id_2']>0, 'a_follows_b_2'] = 1\n",
    "    gf2.drop_column('b_user_id_y')\n",
    "    gf2.drop_column('b_user_id_2')\n",
    "    gf2.drop_column('a_user_id_y')\n",
    "    gf2.columns = ['a_user_id', 'b_user_id', 'b_follows_a', 'idx', 'a_follows_b', 'b_follows_a_2', 'a_follows_a_2']\n",
    "    \n",
    "    gf1 = gf1.sort_values('idx')\n",
    "    gf2 = gf2.sort_values('idx')\n",
    "    \n",
    "    train['a_follows_b'] = gf1['a_follows_b'].fillna(0).astype('int8').to_array()\n",
    "    train['b_follows_a_2'] = gf1['b_follows_a_2'].fillna(0).astype('int8').to_array()\n",
    "    train['a_follows_b_2'] = gf1['a_follows_a_2'].fillna(0).astype('int8').to_array()\n",
    "    \n",
    "    valid['a_follows_b'] = gf2['a_follows_b'].fillna(0).astype('int8').to_array()\n",
    "    valid['b_follows_a_2'] = gf2['b_follows_a_2'].fillna(0).astype('int8').to_array()\n",
    "    valid['a_follows_b_2'] = gf2['a_follows_a_2'].fillna(0).astype('int8').to_array()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 22.1 s, sys: 31 s, total: 53.1 s\n",
      "Wall time: 49.7 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "follower_chain_2(train, valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [],
   "source": [
    "def combined_frequency(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['a_user_id', 'b_user_id']]).reset_index(drop=True)\n",
    "    gf1['idx'] = gf1.index\n",
    "    gf2 = cudf.from_pandas(valid[['a_user_id', 'b_user_id']]).reset_index(drop=True)\n",
    "    gf2['idx'] = gf2.index\n",
    "    \n",
    "    gf = cudf.concat([gf1, gf2])\n",
    "    gf = gf[['a_user_id', 'b_user_id']].groupby(['a_user_id', 'b_user_id']).size().reset_index()\n",
    "    gf.columns = ['a_user_id', 'b_user_id', 'freq_same']\n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['a_user_id', 'b_user_id'])\n",
    "    gf2 = gf2.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['a_user_id', 'b_user_id'])\n",
    "    gf.columns = ['a_user_id', 'b_user_id', 'freq_diff']\n",
    "    gf1 = gf1.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['b_user_id', 'a_user_id'])\n",
    "    gf2 = gf2.merge(gf, how='left', left_on=['a_user_id', 'b_user_id'], right_on=['b_user_id', 'a_user_id'])\n",
    "    \n",
    "    train['ab_freq_s'] = gf1['freq_same'].fillna(0).astype('int32').to_array()\n",
    "    train['ab_freq_d'] = gf1['freq_diff'].fillna(0).astype('int32').to_array()\n",
    "    train['ab_freq_sd'] = train['ab_freq_s'] + train['ab_freq_d']\n",
    "    \n",
    "    valid['ab_freq_s'] = gf2['freq_same'].fillna(0).astype('int32').to_array()\n",
    "    valid['ab_freq_d'] = gf2['freq_diff'].fillna(0).astype('int32').to_array()\n",
    "    valid['ab_freq_sd'] = valid['ab_freq_s'] + valid['ab_freq_d']\n",
    "\n",
    "def add_no_tweet_time(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['timestamp', 'a_user_id', 'b_user_id', 'tweet_id', 'no_tweet']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['timestamp', 'a_user_id', 'b_user_id', 'tweet_id', 'no_tweet']]).reset_index(drop=True)\n",
    "\n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf = dask_cudf.from_cudf(gf, npartitions=64)\n",
    "    gf['timestamp'] = gf['timestamp'].astype('int64')/1e9\n",
    "    gf_unique = gf[['timestamp', 'a_user_id', 'tweet_id']].drop_duplicates()\n",
    "    gf_unique.columns = ['tmp_timestamp', 'tmp_a_user_id', 'tmp_tweet_id']\n",
    "    gf = gf[gf['no_tweet']!=0]\n",
    "    gf = gf.drop('no_tweet', axis=1)\n",
    "    gf = gf.drop('a_user_id', axis=1)\n",
    "    gf = gf.merge(gf_unique, how='left', left_on='b_user_id', right_on='tmp_a_user_id')\n",
    "    gf = gf[gf['tweet_id']!=gf['tmp_tweet_id']]\n",
    "    gf = gf[~gf['tmp_a_user_id'].isna()]\n",
    "\n",
    "    for sec_interval in [5,60,240,480,1440]:\n",
    "        gf['diff_timestamp_prev'] = gf['timestamp']-gf['tmp_timestamp']\n",
    "        gf['diff_timestamp_after'] = gf['tmp_timestamp']-gf['timestamp']\n",
    "        gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']>0, 15*24*3600)\n",
    "        gf['diff_timestamp_prev'] = gf.diff_timestamp_prev.where(gf['diff_timestamp_prev']>0, 15*24*3600)\n",
    "        gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']<sec_interval*60, 0)\n",
    "        gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']==0, 1)\n",
    "        gf['diff_timestamp_prev'] = gf.diff_timestamp_after.where(gf['diff_timestamp_prev']<sec_interval*60, 0)\n",
    "        gf['diff_timestamp_prev'] = gf.diff_timestamp_after.where(gf['diff_timestamp_prev']==0, 1)\n",
    "        gf_tmp = gf[['tweet_id', \n",
    "                     'b_user_id', \n",
    "                     'diff_timestamp_prev', \n",
    "                     'diff_timestamp_after']].groupby(['tweet_id', 'b_user_id']).sum().reset_index()\n",
    "\n",
    "        gf_tmp.to_parquet('/tmp/time2_gf' + str(sec_interval))\n",
    "\n",
    "    for sec_interval in [5,60,240,480,1440]:\n",
    "        gf = cudf.read_parquet('/tmp/time2_gf' + str(sec_interval) + '/part.0.parquet')\n",
    "        gf.columns = ['idx2', 'tweet_id', 'b_user_id', 'tweets_pres_s' + str(sec_interval), 'tweets_after_s' + str(sec_interval)]\n",
    "        gf = gf.drop('idx2', axis=1)\n",
    "        gf1 = cudf.from_pandas(train[['b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "        gf1['idx'] = gf1.index\n",
    "        gf1 = gf1.merge(gf, how='left', left_on=['tweet_id', 'b_user_id'], right_on=['tweet_id', 'b_user_id'])\n",
    "        gf1 = gf1.sort_values('idx')\n",
    "        #train['tweets_after_s' + str(sec_interval)] = gf1['tweets_after_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        train['tweets_prev_s' + str(sec_interval)] = gf1['tweets_pres_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        del gf1; gc.collect()\n",
    "\n",
    "        gf1 = cudf.from_pandas(valid[['b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "        gf1['idx'] = gf1.index\n",
    "        gf1 = gf1.merge(gf, how='left', left_on=['tweet_id', 'b_user_id'], right_on=['tweet_id', 'b_user_id'])\n",
    "        gf1 = gf1.sort_values('idx')\n",
    "        #valid['tweets_after_s' + str(sec_interval)] = gf1['tweets_after_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        valid['tweets_prev_s' + str(sec_interval)] = gf1['tweets_pres_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        del gf1; gc.collect()\n",
    "\n",
    "def add_no_eng_time(train, valid):\n",
    "    gf1 = cudf.from_pandas(train[['timestamp', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "    gf2 = cudf.from_pandas(valid[['timestamp', 'b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "\n",
    "    gf = cudf.concat([gf1, gf2], axis=0)\n",
    "    gf = dask_cudf.from_cudf(gf, npartitions=64)\n",
    "    gf['timestamp'] = gf['timestamp'].astype('int64')/1e9\n",
    "    gf_unique = gf[['timestamp', 'b_user_id', 'tweet_id']]\n",
    "    gf_unique.columns = ['tmp_timestamp', 'tmp_b_user_id', 'tmp_tweet_id']\n",
    "    gf = gf.merge(gf_unique, how='left', left_on='b_user_id', right_on='tmp_b_user_id')\n",
    "    gf = gf[gf['tweet_id']!=gf['tmp_tweet_id']]\n",
    "\n",
    "    for sec_interval in [5,60,240,480,1440]:\n",
    "        gf['diff_timestamp_prev'] = gf['timestamp']-gf['tmp_timestamp']\n",
    "        gf['diff_timestamp_prev'] = gf.diff_timestamp_prev.where(gf['diff_timestamp_prev']>0, 15*24*3600)\n",
    "        gf['diff_timestamp_prev'] = gf.diff_timestamp_prev.where(gf['diff_timestamp_prev']<sec_interval*60, 0)\n",
    "        gf['diff_timestamp_prev'] = gf.diff_timestamp_prev.where(gf['diff_timestamp_prev']==0, 1)\n",
    "        gf['diff_timestamp_after'] = gf['tmp_timestamp']-gf['timestamp']\n",
    "        gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']>0, 15*24*3600)\n",
    "        gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']<sec_interval*60, 0)\n",
    "        gf['diff_timestamp_after'] = gf.diff_timestamp_after.where(gf['diff_timestamp_after']==0, 1)\n",
    "        gf_tmp = gf[['tweet_id', \n",
    "                     'b_user_id', \n",
    "                     'diff_timestamp_prev', \n",
    "                     'diff_timestamp_after']].groupby(['tweet_id', 'b_user_id']).sum().reset_index()\n",
    "\n",
    "        gf_tmp.to_parquet('/tmp/time2_eng_gf' + str(sec_interval))\n",
    "\n",
    "    for sec_interval in [5,60,240,480,1440]:\n",
    "        gf = cudf.read_parquet('/tmp/time2_eng_gf' + str(sec_interval) + '/part.0.parquet')\n",
    "        gf.columns = ['idx2', 'tweet_id', 'b_user_id', 'tweets_pres_s' + str(sec_interval), 'tweets_after_s' + str(sec_interval)]\n",
    "        gf = gf.drop('idx2', axis=1)\n",
    "        gf1 = cudf.from_pandas(train[['b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "        gf1['idx'] = gf1.index\n",
    "        gf1 = gf1.merge(gf, how='left', left_on=['tweet_id', 'b_user_id'], right_on=['tweet_id', 'b_user_id'])\n",
    "        gf1 = gf1.sort_values('idx')\n",
    "        train['eng_after_s' + str(sec_interval)] = gf1['tweets_after_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        train['eng_prev_s' + str(sec_interval)] = gf1['tweets_pres_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        del gf1; gc.collect()\n",
    "\n",
    "        gf1 = cudf.from_pandas(valid[['b_user_id', 'tweet_id']]).reset_index(drop=True)\n",
    "        gf1['idx'] = gf1.index\n",
    "        gf1 = gf1.merge(gf, how='left', left_on=['tweet_id', 'b_user_id'], right_on=['tweet_id', 'b_user_id'])\n",
    "        gf1 = gf1.sort_values('idx')\n",
    "        valid['eng_after_s' + str(sec_interval)] = gf1['tweets_after_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        valid['eng_prev_s' + str(sec_interval)] = gf1['tweets_pres_s' + str(sec_interval)].fillna(0).astype('int32').to_array()\n",
    "        del gf1; gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 3.95 s, sys: 4.4 s, total: 8.34 s\n",
      "Wall time: 7.93 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "combined_frequency(train, valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1min 55s, sys: 1min 25s, total: 3min 21s\n",
      "Wall time: 8min 44s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "add_no_tweet_time(train, valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 2min 11s, sys: 1min 31s, total: 3min 42s\n",
      "Wall time: 9min 37s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "\n",
    "add_no_eng_time(train, valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [],
   "source": [
    "train.to_parquet('results/sub_train_final.parquet')\n",
    "valid.to_parquet('results/sub_valid_final.parquet')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Summarize Features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = pd.read_parquet('results/sub_train_final.parquet')\n",
    "valid = pd.read_parquet('results/sub_valid_final.parquet')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "label_names = ['reply', 'retweet', 'retweet_comment', 'like']\n",
    "DONT_USE = ['timestamp','a_account_creation','b_account_creation','engage_time',\n",
    "            'fold','tweet_id','b_user_id','a_user_id', 'dt_dow',\n",
    "            'a_account_creation', 'b_account_creation', 'elapsed_time',\n",
    "             'links','domains','hashtags0','hashtags1', 'tw_hash0', 'tw_hash1', 'tw_rt_uhash', 'id']\n",
    "DONT_USE += label_names\n",
    "features = [c for c in train.columns if c not in DONT_USE]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using 115 features:\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array(['media', 'tweet_type', 'language', 'a_follower_count',\n",
       "       'a_following_count', 'a_is_verified', 'b_follower_count',\n",
       "       'b_following_count', 'b_is_verified', 'b_follows_a',\n",
       "       'len_hashtags', 'len_domains', 'len_links', 'count_ats',\n",
       "       'count_char', 'count_words', 'dt_hour', 'dt_minute', 'dt_second',\n",
       "       'same_tw_rt_uhash', 'diff_tw_rt_uhash', 'nan_tw_rt_uhash',\n",
       "       'same_tw_hash0', 'diff_tw_hash0', 'nan_tw_hash0', 'no_tweet',\n",
       "       'b_timestamp_1', 'b_timestamp_-1', 'diff_timestamp_prev',\n",
       "       'diff_timestamp_after', 'TE_media_reply', 'TE_media_retweet',\n",
       "       'TE_media_retweet_comment', 'TE_media_like', 'TE_tweet_type_reply',\n",
       "       'TE_tweet_type_retweet', 'TE_tweet_type_retweet_comment',\n",
       "       'TE_tweet_type_like', 'TE_language_reply', 'TE_language_retweet',\n",
       "       'TE_language_retweet_comment', 'TE_language_like',\n",
       "       'TE_a_user_id_reply', 'TE_a_user_id_retweet',\n",
       "       'TE_a_user_id_retweet_comment', 'TE_a_user_id_like',\n",
       "       'TE_b_user_id_reply', 'TE_b_user_id_retweet',\n",
       "       'TE_b_user_id_retweet_comment', 'TE_b_user_id_like',\n",
       "       'TE_tw_hash0_reply', 'TE_tw_hash0_retweet',\n",
       "       'TE_tw_hash0_retweet_comment', 'TE_tw_hash0_like',\n",
       "       'TE_tw_rt_uhash_reply', 'TE_tw_rt_uhash_retweet',\n",
       "       'TE_tw_rt_uhash_retweet_comment', 'TE_tw_rt_uhash_like',\n",
       "       'TE_mult_reply', 'TE_mult_retweet', 'TE_mult_retweet_comment',\n",
       "       'TE_mult_like', 'TE_media_elapsed_time',\n",
       "       'TE_tweet_type_elapsed_time', 'TE_language_elapsed_time',\n",
       "       'TE_a_user_id_elapsed_time', 'TE_b_user_id_elapsed_time',\n",
       "       'CE_media_norm', 'CE_media', 'CE_tweet_type_norm', 'CE_tweet_type',\n",
       "       'CE_language_norm', 'CE_language', 'CE_a_user_id_norm',\n",
       "       'CE_a_user_id', 'CE_b_user_id_norm', 'CE_b_user_id',\n",
       "       'DE_b_user_id_b_follower_count_1',\n",
       "       'DE_b_user_id_b_follower_count_-1',\n",
       "       'DE_b_user_id_b_following_count_1',\n",
       "       'DE_b_user_id_b_following_count_-1', 'DE_b_user_id_language_1',\n",
       "       'DE_b_user_id_language_-1', 'same_language', 'diff_language',\n",
       "       'nan_language', 'a_ff_rate', 'b_ff_rate', 'ab_fing_rate',\n",
       "       'ab_fer_rate', 'a_age', 'b_age', 'ab_age_dff', 'ab_age_rate',\n",
       "       'a_follows_b', 'b_follows_a_2', 'a_follows_b_2', 'ab_freq_s',\n",
       "       'ab_freq_d', 'ab_freq_sd', 'tweets_prev_s5', 'tweets_prev_s60',\n",
       "       'tweets_prev_s240', 'tweets_prev_s480', 'tweets_prev_s1440',\n",
       "       'eng_after_s5', 'eng_prev_s5', 'eng_after_s60', 'eng_prev_s60',\n",
       "       'eng_after_s240', 'eng_prev_s240', 'eng_after_s480',\n",
       "       'eng_prev_s480', 'eng_after_s1440', 'eng_prev_s1440'], dtype='<U33')"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "print('Using %i features:'%(len(features)))\n",
    "np.asarray(features)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Train Model Validate\n",
    "We will train on random `0.10 * 5/7` of all 7 days. This is same size that we validated with."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "SAMPLE_RATIO = (0.15) *(0.724) # Same size as validation train\n",
    "SEED = 1\n",
    "if SAMPLE_RATIO < 1.0:\n",
    "    train = train.sample(frac=SAMPLE_RATIO, random_state=SEED)\n",
    "    gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "XGB Version 1.0.2\n"
     ]
    }
   ],
   "source": [
    "xgb_parms = { \n",
    "    'max_depth':8, \n",
    "    'learning_rate':0.1, \n",
    "    'subsample':0.8,\n",
    "    'colsample_bytree':0.3, \n",
    "    'eval_metric':'auc',\n",
    "    'objective':'binary:logistic',\n",
    "    'tree_method':'gpu_hist',\n",
    "    'predictor' : 'gpu_predictor'\n",
    "}\n",
    "\n",
    "import xgboost as xgb\n",
    "print('XGB Version',xgb.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>tweet_id</th>\n",
       "      <th>media</th>\n",
       "      <th>domains</th>\n",
       "      <th>tweet_type</th>\n",
       "      <th>language</th>\n",
       "      <th>timestamp</th>\n",
       "      <th>a_user_id</th>\n",
       "      <th>a_follower_count</th>\n",
       "      <th>a_following_count</th>\n",
       "      <th>a_is_verified</th>\n",
       "      <th>a_account_creation</th>\n",
       "      <th>b_user_id</th>\n",
       "      <th>b_follower_count</th>\n",
       "      <th>b_following_count</th>\n",
       "      <th>b_is_verified</th>\n",
       "      <th>b_account_creation</th>\n",
       "      <th>b_follows_a</th>\n",
       "      <th>reply</th>\n",
       "      <th>retweet</th>\n",
       "      <th>retweet_comment</th>\n",
       "      <th>like</th>\n",
       "      <th>id</th>\n",
       "      <th>len_hashtags</th>\n",
       "      <th>len_domains</th>\n",
       "      <th>len_links</th>\n",
       "      <th>tr</th>\n",
       "      <th>count_ats</th>\n",
       "      <th>count_char</th>\n",
       "      <th>count_words</th>\n",
       "      <th>tw_hash0</th>\n",
       "      <th>tw_hash1</th>\n",
       "      <th>tw_rt_uhash</th>\n",
       "      <th>dt_dow</th>\n",
       "      <th>dt_hour</th>\n",
       "      <th>dt_minute</th>\n",
       "      <th>dt_second</th>\n",
       "      <th>same_tw_rt_uhash</th>\n",
       "      <th>diff_tw_rt_uhash</th>\n",
       "      <th>nan_tw_rt_uhash</th>\n",
       "      <th>same_tw_hash0</th>\n",
       "      <th>diff_tw_hash0</th>\n",
       "      <th>nan_tw_hash0</th>\n",
       "      <th>no_tweet</th>\n",
       "      <th>b_timestamp_1</th>\n",
       "      <th>b_timestamp_-1</th>\n",
       "      <th>diff_timestamp_prev</th>\n",
       "      <th>diff_timestamp_after</th>\n",
       "      <th>TE_media_reply</th>\n",
       "      <th>TE_media_retweet</th>\n",
       "      <th>TE_media_retweet_comment</th>\n",
       "      <th>TE_media_like</th>\n",
       "      <th>TE_tweet_type_reply</th>\n",
       "      <th>TE_tweet_type_retweet</th>\n",
       "      <th>TE_tweet_type_retweet_comment</th>\n",
       "      <th>TE_tweet_type_like</th>\n",
       "      <th>TE_language_reply</th>\n",
       "      <th>TE_language_retweet</th>\n",
       "      <th>TE_language_retweet_comment</th>\n",
       "      <th>TE_language_like</th>\n",
       "      <th>TE_a_user_id_reply</th>\n",
       "      <th>TE_a_user_id_retweet</th>\n",
       "      <th>TE_a_user_id_retweet_comment</th>\n",
       "      <th>TE_a_user_id_like</th>\n",
       "      <th>TE_b_user_id_reply</th>\n",
       "      <th>TE_b_user_id_retweet</th>\n",
       "      <th>TE_b_user_id_retweet_comment</th>\n",
       "      <th>TE_b_user_id_like</th>\n",
       "      <th>TE_tw_hash0_reply</th>\n",
       "      <th>TE_tw_hash0_retweet</th>\n",
       "      <th>TE_tw_hash0_retweet_comment</th>\n",
       "      <th>TE_tw_hash0_like</th>\n",
       "      <th>TE_tw_rt_uhash_reply</th>\n",
       "      <th>TE_tw_rt_uhash_retweet</th>\n",
       "      <th>TE_tw_rt_uhash_retweet_comment</th>\n",
       "      <th>TE_tw_rt_uhash_like</th>\n",
       "      <th>TE_mult_reply</th>\n",
       "      <th>TE_mult_retweet</th>\n",
       "      <th>TE_mult_retweet_comment</th>\n",
       "      <th>TE_mult_like</th>\n",
       "      <th>TE_media_elapsed_time</th>\n",
       "      <th>TE_tweet_type_elapsed_time</th>\n",
       "      <th>TE_language_elapsed_time</th>\n",
       "      <th>TE_a_user_id_elapsed_time</th>\n",
       "      <th>TE_b_user_id_elapsed_time</th>\n",
       "      <th>CE_media_norm</th>\n",
       "      <th>CE_media</th>\n",
       "      <th>CE_tweet_type_norm</th>\n",
       "      <th>CE_tweet_type</th>\n",
       "      <th>CE_language_norm</th>\n",
       "      <th>CE_language</th>\n",
       "      <th>CE_a_user_id_norm</th>\n",
       "      <th>CE_a_user_id</th>\n",
       "      <th>CE_b_user_id_norm</th>\n",
       "      <th>CE_b_user_id</th>\n",
       "      <th>DE_b_user_id_b_follower_count_1</th>\n",
       "      <th>DE_b_user_id_b_follower_count_-1</th>\n",
       "      <th>DE_b_user_id_b_following_count_1</th>\n",
       "      <th>DE_b_user_id_b_following_count_-1</th>\n",
       "      <th>DE_b_user_id_language_1</th>\n",
       "      <th>DE_b_user_id_language_-1</th>\n",
       "      <th>same_language</th>\n",
       "      <th>diff_language</th>\n",
       "      <th>nan_language</th>\n",
       "      <th>a_ff_rate</th>\n",
       "      <th>b_ff_rate</th>\n",
       "      <th>ab_fing_rate</th>\n",
       "      <th>ab_fer_rate</th>\n",
       "      <th>a_age</th>\n",
       "      <th>b_age</th>\n",
       "      <th>ab_age_dff</th>\n",
       "      <th>ab_age_rate</th>\n",
       "      <th>a_follows_b</th>\n",
       "      <th>b_follows_a_2</th>\n",
       "      <th>a_follows_b_2</th>\n",
       "      <th>ab_freq_s</th>\n",
       "      <th>ab_freq_d</th>\n",
       "      <th>ab_freq_sd</th>\n",
       "      <th>tweets_prev_s5</th>\n",
       "      <th>tweets_prev_s60</th>\n",
       "      <th>tweets_prev_s240</th>\n",
       "      <th>tweets_prev_s480</th>\n",
       "      <th>tweets_prev_s1440</th>\n",
       "      <th>eng_after_s5</th>\n",
       "      <th>eng_prev_s5</th>\n",
       "      <th>eng_after_s60</th>\n",
       "      <th>eng_prev_s60</th>\n",
       "      <th>eng_after_s240</th>\n",
       "      <th>eng_prev_s240</th>\n",
       "      <th>eng_after_s480</th>\n",
       "      <th>eng_prev_s480</th>\n",
       "      <th>eng_after_s1440</th>\n",
       "      <th>eng_prev_s1440</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>57733249</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>54</td>\n",
       "      <td>2020-02-14 17:58:46</td>\n",
       "      <td>534117</td>\n",
       "      <td>13941</td>\n",
       "      <td>1216</td>\n",
       "      <td>False</td>\n",
       "      <td>2015-11-23 15:23:06</td>\n",
       "      <td>3617447</td>\n",
       "      <td>27448</td>\n",
       "      <td>600</td>\n",
       "      <td>False</td>\n",
       "      <td>2018-03-13 13:47:49</td>\n",
       "      <td>True</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>121386431</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>55</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>17</td>\n",
       "      <td>58</td>\n",
       "      <td>46</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>5</td>\n",
       "      <td>33</td>\n",
       "      <td>-128</td>\n",
       "      <td>518474</td>\n",
       "      <td>50494</td>\n",
       "      <td>0.024988</td>\n",
       "      <td>0.094235</td>\n",
       "      <td>0.006692</td>\n",
       "      <td>0.474590</td>\n",
       "      <td>0.034772</td>\n",
       "      <td>0.098410</td>\n",
       "      <td>0.007785</td>\n",
       "      <td>0.510755</td>\n",
       "      <td>0.023573</td>\n",
       "      <td>0.100355</td>\n",
       "      <td>0.008051</td>\n",
       "      <td>0.427159</td>\n",
       "      <td>0.007879</td>\n",
       "      <td>0.064392</td>\n",
       "      <td>0.002241</td>\n",
       "      <td>0.410557</td>\n",
       "      <td>0.021339</td>\n",
       "      <td>0.091062</td>\n",
       "      <td>0.006069</td>\n",
       "      <td>0.403593</td>\n",
       "      <td>0.025726</td>\n",
       "      <td>0.111593</td>\n",
       "      <td>0.007505</td>\n",
       "      <td>0.445434</td>\n",
       "      <td>0.034587</td>\n",
       "      <td>0.095342</td>\n",
       "      <td>0.007865</td>\n",
       "      <td>0.512623</td>\n",
       "      <td>0.075886</td>\n",
       "      <td>0.119572</td>\n",
       "      <td>0.014521</td>\n",
       "      <td>0.703054</td>\n",
       "      <td>-1.581221e+09</td>\n",
       "      <td>-1.581206e+09</td>\n",
       "      <td>-1.581185e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>0.183514</td>\n",
       "      <td>27279102.0</td>\n",
       "      <td>0.578340</td>\n",
       "      <td>84679896.0</td>\n",
       "      <td>0.447992</td>\n",
       "      <td>63148424.0</td>\n",
       "      <td>3.618880e-07</td>\n",
       "      <td>54.0</td>\n",
       "      <td>1.206293e-07</td>\n",
       "      <td>7.0</td>\n",
       "      <td>-8.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0.087225</td>\n",
       "      <td>45.746666</td>\n",
       "      <td>2.026667</td>\n",
       "      <td>0.507906</td>\n",
       "      <td>51.000000</td>\n",
       "      <td>22.966667</td>\n",
       "      <td>-28.033333</td>\n",
       "      <td>2.220610</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>57733250</td>\n",
       "      <td>7</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>47</td>\n",
       "      <td>2020-02-18 10:30:42</td>\n",
       "      <td>2721240</td>\n",
       "      <td>186</td>\n",
       "      <td>100</td>\n",
       "      <td>False</td>\n",
       "      <td>2010-01-09 23:09:26</td>\n",
       "      <td>12365145</td>\n",
       "      <td>139</td>\n",
       "      <td>956</td>\n",
       "      <td>False</td>\n",
       "      <td>2012-04-22 15:58:19</td>\n",
       "      <td>False</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>121386432</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>57</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>102048</td>\n",
       "      <td>1</td>\n",
       "      <td>10</td>\n",
       "      <td>30</td>\n",
       "      <td>42</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>47</td>\n",
       "      <td>-1</td>\n",
       "      <td>766962</td>\n",
       "      <td>1296000</td>\n",
       "      <td>0.018753</td>\n",
       "      <td>0.138635</td>\n",
       "      <td>0.008074</td>\n",
       "      <td>0.540576</td>\n",
       "      <td>0.007432</td>\n",
       "      <td>0.132841</td>\n",
       "      <td>0.006253</td>\n",
       "      <td>0.288196</td>\n",
       "      <td>0.019282</td>\n",
       "      <td>0.093730</td>\n",
       "      <td>0.006771</td>\n",
       "      <td>0.462129</td>\n",
       "      <td>0.023279</td>\n",
       "      <td>0.099340</td>\n",
       "      <td>0.006621</td>\n",
       "      <td>0.485738</td>\n",
       "      <td>0.016004</td>\n",
       "      <td>0.099546</td>\n",
       "      <td>0.004552</td>\n",
       "      <td>0.552695</td>\n",
       "      <td>0.025726</td>\n",
       "      <td>0.111593</td>\n",
       "      <td>0.007505</td>\n",
       "      <td>0.445434</td>\n",
       "      <td>0.016520</td>\n",
       "      <td>0.070500</td>\n",
       "      <td>0.004699</td>\n",
       "      <td>0.473749</td>\n",
       "      <td>0.004349</td>\n",
       "      <td>0.135523</td>\n",
       "      <td>0.004298</td>\n",
       "      <td>0.343614</td>\n",
       "      <td>-1.581273e+09</td>\n",
       "      <td>-1.581252e+09</td>\n",
       "      <td>-1.581253e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>0.070948</td>\n",
       "      <td>10919567.0</td>\n",
       "      <td>0.329188</td>\n",
       "      <td>48525540.0</td>\n",
       "      <td>0.049365</td>\n",
       "      <td>7510074.0</td>\n",
       "      <td>4.020978e-08</td>\n",
       "      <td>3.0</td>\n",
       "      <td>2.010489e-07</td>\n",
       "      <td>17.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>-7.0</td>\n",
       "      <td>-7.0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0.537634</td>\n",
       "      <td>0.145397</td>\n",
       "      <td>0.104603</td>\n",
       "      <td>1.338130</td>\n",
       "      <td>122.466667</td>\n",
       "      <td>94.666667</td>\n",
       "      <td>-27.800000</td>\n",
       "      <td>1.293662</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>57733251</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>13</td>\n",
       "      <td>2020-02-15 02:48:38</td>\n",
       "      <td>2023199</td>\n",
       "      <td>250470</td>\n",
       "      <td>1</td>\n",
       "      <td>False</td>\n",
       "      <td>2012-12-26 02:17:49</td>\n",
       "      <td>28952089</td>\n",
       "      <td>16</td>\n",
       "      <td>97</td>\n",
       "      <td>False</td>\n",
       "      <td>2017-08-28 17:18:31</td>\n",
       "      <td>False</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>121386433</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>49</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>5</td>\n",
       "      <td>2</td>\n",
       "      <td>48</td>\n",
       "      <td>38</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>-128</td>\n",
       "      <td>-128</td>\n",
       "      <td>1296000</td>\n",
       "      <td>1296000</td>\n",
       "      <td>0.026295</td>\n",
       "      <td>0.114113</td>\n",
       "      <td>0.006647</td>\n",
       "      <td>0.434231</td>\n",
       "      <td>0.034772</td>\n",
       "      <td>0.098410</td>\n",
       "      <td>0.007785</td>\n",
       "      <td>0.510755</td>\n",
       "      <td>0.031939</td>\n",
       "      <td>0.132462</td>\n",
       "      <td>0.014753</td>\n",
       "      <td>0.461783</td>\n",
       "      <td>0.010243</td>\n",
       "      <td>0.063710</td>\n",
       "      <td>0.002913</td>\n",
       "      <td>0.333725</td>\n",
       "      <td>0.025607</td>\n",
       "      <td>0.109274</td>\n",
       "      <td>0.007283</td>\n",
       "      <td>0.434312</td>\n",
       "      <td>0.025726</td>\n",
       "      <td>0.111593</td>\n",
       "      <td>0.007505</td>\n",
       "      <td>0.445434</td>\n",
       "      <td>0.034587</td>\n",
       "      <td>0.095342</td>\n",
       "      <td>0.007865</td>\n",
       "      <td>0.512623</td>\n",
       "      <td>0.014791</td>\n",
       "      <td>0.201175</td>\n",
       "      <td>0.011906</td>\n",
       "      <td>0.547681</td>\n",
       "      <td>-1.581260e+09</td>\n",
       "      <td>-1.581206e+09</td>\n",
       "      <td>-1.581222e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>0.012145</td>\n",
       "      <td>1637619.0</td>\n",
       "      <td>0.578340</td>\n",
       "      <td>84679896.0</td>\n",
       "      <td>0.006631</td>\n",
       "      <td>1035820.0</td>\n",
       "      <td>1.568181e-05</td>\n",
       "      <td>420.0</td>\n",
       "      <td>4.020978e-08</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0.000004</td>\n",
       "      <td>0.164948</td>\n",
       "      <td>0.010309</td>\n",
       "      <td>15654.375000</td>\n",
       "      <td>86.400000</td>\n",
       "      <td>29.533333</td>\n",
       "      <td>-56.900000</td>\n",
       "      <td>2.925508</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>57733252</td>\n",
       "      <td>7</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>54</td>\n",
       "      <td>2020-02-17 04:26:53</td>\n",
       "      <td>2816974</td>\n",
       "      <td>516</td>\n",
       "      <td>406</td>\n",
       "      <td>False</td>\n",
       "      <td>2015-12-02 22:49:27</td>\n",
       "      <td>13774342</td>\n",
       "      <td>460</td>\n",
       "      <td>693</td>\n",
       "      <td>False</td>\n",
       "      <td>2014-04-01 00:25:56</td>\n",
       "      <td>True</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>121386434</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>104</td>\n",
       "      <td>16</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>2446</td>\n",
       "      <td>0</td>\n",
       "      <td>4</td>\n",
       "      <td>26</td>\n",
       "      <td>53</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>58</td>\n",
       "      <td>-106</td>\n",
       "      <td>1296000</td>\n",
       "      <td>1296000</td>\n",
       "      <td>0.018753</td>\n",
       "      <td>0.138635</td>\n",
       "      <td>0.008074</td>\n",
       "      <td>0.540576</td>\n",
       "      <td>0.007432</td>\n",
       "      <td>0.132841</td>\n",
       "      <td>0.006253</td>\n",
       "      <td>0.288196</td>\n",
       "      <td>0.023573</td>\n",
       "      <td>0.100355</td>\n",
       "      <td>0.008051</td>\n",
       "      <td>0.427159</td>\n",
       "      <td>0.016004</td>\n",
       "      <td>0.068296</td>\n",
       "      <td>0.004552</td>\n",
       "      <td>0.458945</td>\n",
       "      <td>0.016004</td>\n",
       "      <td>0.099546</td>\n",
       "      <td>0.004552</td>\n",
       "      <td>0.427695</td>\n",
       "      <td>0.025726</td>\n",
       "      <td>0.111593</td>\n",
       "      <td>0.007505</td>\n",
       "      <td>0.445434</td>\n",
       "      <td>0.001847</td>\n",
       "      <td>0.138021</td>\n",
       "      <td>0.005790</td>\n",
       "      <td>0.437448</td>\n",
       "      <td>0.006334</td>\n",
       "      <td>0.144244</td>\n",
       "      <td>0.010178</td>\n",
       "      <td>0.423202</td>\n",
       "      <td>-1.581273e+09</td>\n",
       "      <td>-1.581252e+09</td>\n",
       "      <td>-1.581185e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>0.070948</td>\n",
       "      <td>10919567.0</td>\n",
       "      <td>0.329188</td>\n",
       "      <td>48525540.0</td>\n",
       "      <td>0.447992</td>\n",
       "      <td>63148424.0</td>\n",
       "      <td>8.041955e-08</td>\n",
       "      <td>14.0</td>\n",
       "      <td>2.412587e-07</td>\n",
       "      <td>18.0</td>\n",
       "      <td>-2.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>-1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0.786822</td>\n",
       "      <td>0.663781</td>\n",
       "      <td>0.585859</td>\n",
       "      <td>1.121739</td>\n",
       "      <td>50.700000</td>\n",
       "      <td>71.033333</td>\n",
       "      <td>20.333333</td>\n",
       "      <td>0.713749</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>57733253</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>54</td>\n",
       "      <td>2020-02-13 03:49:05</td>\n",
       "      <td>366629</td>\n",
       "      <td>19576</td>\n",
       "      <td>273</td>\n",
       "      <td>True</td>\n",
       "      <td>2009-03-04 15:49:58</td>\n",
       "      <td>11208153</td>\n",
       "      <td>468</td>\n",
       "      <td>3837</td>\n",
       "      <td>False</td>\n",
       "      <td>2011-02-25 15:13:21</td>\n",
       "      <td>False</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>121386435</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>82</td>\n",
       "      <td>10</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "      <td>49</td>\n",
       "      <td>5</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>-128</td>\n",
       "      <td>79</td>\n",
       "      <td>355196</td>\n",
       "      <td>1296000</td>\n",
       "      <td>0.024988</td>\n",
       "      <td>0.094235</td>\n",
       "      <td>0.006692</td>\n",
       "      <td>0.474590</td>\n",
       "      <td>0.034772</td>\n",
       "      <td>0.098410</td>\n",
       "      <td>0.007785</td>\n",
       "      <td>0.510755</td>\n",
       "      <td>0.023573</td>\n",
       "      <td>0.100355</td>\n",
       "      <td>0.008051</td>\n",
       "      <td>0.427159</td>\n",
       "      <td>0.002910</td>\n",
       "      <td>0.052190</td>\n",
       "      <td>0.000828</td>\n",
       "      <td>0.458445</td>\n",
       "      <td>0.025607</td>\n",
       "      <td>0.109274</td>\n",
       "      <td>0.007283</td>\n",
       "      <td>0.434312</td>\n",
       "      <td>0.025726</td>\n",
       "      <td>0.111593</td>\n",
       "      <td>0.007505</td>\n",
       "      <td>0.445434</td>\n",
       "      <td>0.034587</td>\n",
       "      <td>0.095342</td>\n",
       "      <td>0.007865</td>\n",
       "      <td>0.512623</td>\n",
       "      <td>0.018369</td>\n",
       "      <td>0.069014</td>\n",
       "      <td>0.006594</td>\n",
       "      <td>0.517109</td>\n",
       "      <td>-1.581221e+09</td>\n",
       "      <td>-1.581206e+09</td>\n",
       "      <td>-1.581185e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>-1.581258e+09</td>\n",
       "      <td>0.183514</td>\n",
       "      <td>27279102.0</td>\n",
       "      <td>0.578340</td>\n",
       "      <td>84679896.0</td>\n",
       "      <td>0.447992</td>\n",
       "      <td>63148424.0</td>\n",
       "      <td>1.005244e-06</td>\n",
       "      <td>181.0</td>\n",
       "      <td>1.206293e-07</td>\n",
       "      <td>3.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>-5.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0.013946</td>\n",
       "      <td>0.121970</td>\n",
       "      <td>0.071149</td>\n",
       "      <td>41.829060</td>\n",
       "      <td>132.833333</td>\n",
       "      <td>108.733333</td>\n",
       "      <td>-24.100000</td>\n",
       "      <td>1.221643</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   tweet_id  media  domains  tweet_type  language           timestamp  \\\n",
       "0  57733249      5        0           2        54 2020-02-14 17:58:46   \n",
       "1  57733250      7        0           1        47 2020-02-18 10:30:42   \n",
       "2  57733251      1        0           2        13 2020-02-15 02:48:38   \n",
       "3  57733252      7        0           1        54 2020-02-17 04:26:53   \n",
       "4  57733253      5        0           2        54 2020-02-13 03:49:05   \n",
       "\n",
       "   a_user_id  a_follower_count  a_following_count  a_is_verified  \\\n",
       "0     534117             13941               1216          False   \n",
       "1    2721240               186                100          False   \n",
       "2    2023199            250470                  1          False   \n",
       "3    2816974               516                406          False   \n",
       "4     366629             19576                273           True   \n",
       "\n",
       "   a_account_creation  b_user_id  b_follower_count  b_following_count  \\\n",
       "0 2015-11-23 15:23:06    3617447             27448                600   \n",
       "1 2010-01-09 23:09:26   12365145               139                956   \n",
       "2 2012-12-26 02:17:49   28952089                16                 97   \n",
       "3 2015-12-02 22:49:27   13774342               460                693   \n",
       "4 2009-03-04 15:49:58   11208153               468               3837   \n",
       "\n",
       "   b_is_verified  b_account_creation  b_follows_a  reply  retweet  \\\n",
       "0          False 2018-03-13 13:47:49         True      0        0   \n",
       "1          False 2012-04-22 15:58:19        False      0        0   \n",
       "2          False 2017-08-28 17:18:31        False      0        0   \n",
       "3          False 2014-04-01 00:25:56         True      0        0   \n",
       "4          False 2011-02-25 15:13:21        False      0        0   \n",
       "\n",
       "   retweet_comment  like         id  len_hashtags  len_domains  len_links  tr  \\\n",
       "0                0     0  121386431             0            0          0   0   \n",
       "1                0     0  121386432             0            0          0   0   \n",
       "2                0     0  121386433             0            0          0   0   \n",
       "3                0     0  121386434             0            0          0   0   \n",
       "4                0     0  121386435             0            0          0   0   \n",
       "\n",
       "   count_ats  count_char  count_words  tw_hash0  tw_hash1  tw_rt_uhash  \\\n",
       "0          0          55            5         0         0            0   \n",
       "1          0          57            5         0         0       102048   \n",
       "2          0          49            5         0         0            0   \n",
       "3          0         104           16         0         0         2446   \n",
       "4          0          82           10         0         0            0   \n",
       "\n",
       "   dt_dow  dt_hour  dt_minute  dt_second  same_tw_rt_uhash  diff_tw_rt_uhash  \\\n",
       "0       4       17         58         46                 0                 0   \n",
       "1       1       10         30         42                 0                 0   \n",
       "2       5        2         48         38                 0                 0   \n",
       "3       0        4         26         53                 0                 0   \n",
       "4       3        3         49          5                 0                 0   \n",
       "\n",
       "   nan_tw_rt_uhash  same_tw_hash0  diff_tw_hash0  nan_tw_hash0  no_tweet  \\\n",
       "0                1              0              0             1         5   \n",
       "1                1              0              0             1         1   \n",
       "2                1              0              0             1         0   \n",
       "3                1              0              0             1         0   \n",
       "4                1              0              0             1         1   \n",
       "\n",
       "   b_timestamp_1  b_timestamp_-1  diff_timestamp_prev  diff_timestamp_after  \\\n",
       "0             33            -128               518474                 50494   \n",
       "1             47              -1               766962               1296000   \n",
       "2           -128            -128              1296000               1296000   \n",
       "3             58            -106              1296000               1296000   \n",
       "4           -128              79               355196               1296000   \n",
       "\n",
       "   TE_media_reply  TE_media_retweet  TE_media_retweet_comment  TE_media_like  \\\n",
       "0        0.024988          0.094235                  0.006692       0.474590   \n",
       "1        0.018753          0.138635                  0.008074       0.540576   \n",
       "2        0.026295          0.114113                  0.006647       0.434231   \n",
       "3        0.018753          0.138635                  0.008074       0.540576   \n",
       "4        0.024988          0.094235                  0.006692       0.474590   \n",
       "\n",
       "   TE_tweet_type_reply  TE_tweet_type_retweet  TE_tweet_type_retweet_comment  \\\n",
       "0             0.034772               0.098410                       0.007785   \n",
       "1             0.007432               0.132841                       0.006253   \n",
       "2             0.034772               0.098410                       0.007785   \n",
       "3             0.007432               0.132841                       0.006253   \n",
       "4             0.034772               0.098410                       0.007785   \n",
       "\n",
       "   TE_tweet_type_like  TE_language_reply  TE_language_retweet  \\\n",
       "0            0.510755           0.023573             0.100355   \n",
       "1            0.288196           0.019282             0.093730   \n",
       "2            0.510755           0.031939             0.132462   \n",
       "3            0.288196           0.023573             0.100355   \n",
       "4            0.510755           0.023573             0.100355   \n",
       "\n",
       "   TE_language_retweet_comment  TE_language_like  TE_a_user_id_reply  \\\n",
       "0                     0.008051          0.427159            0.007879   \n",
       "1                     0.006771          0.462129            0.023279   \n",
       "2                     0.014753          0.461783            0.010243   \n",
       "3                     0.008051          0.427159            0.016004   \n",
       "4                     0.008051          0.427159            0.002910   \n",
       "\n",
       "   TE_a_user_id_retweet  TE_a_user_id_retweet_comment  TE_a_user_id_like  \\\n",
       "0              0.064392                      0.002241           0.410557   \n",
       "1              0.099340                      0.006621           0.485738   \n",
       "2              0.063710                      0.002913           0.333725   \n",
       "3              0.068296                      0.004552           0.458945   \n",
       "4              0.052190                      0.000828           0.458445   \n",
       "\n",
       "   TE_b_user_id_reply  TE_b_user_id_retweet  TE_b_user_id_retweet_comment  \\\n",
       "0            0.021339              0.091062                      0.006069   \n",
       "1            0.016004              0.099546                      0.004552   \n",
       "2            0.025607              0.109274                      0.007283   \n",
       "3            0.016004              0.099546                      0.004552   \n",
       "4            0.025607              0.109274                      0.007283   \n",
       "\n",
       "   TE_b_user_id_like  TE_tw_hash0_reply  TE_tw_hash0_retweet  \\\n",
       "0           0.403593           0.025726             0.111593   \n",
       "1           0.552695           0.025726             0.111593   \n",
       "2           0.434312           0.025726             0.111593   \n",
       "3           0.427695           0.025726             0.111593   \n",
       "4           0.434312           0.025726             0.111593   \n",
       "\n",
       "   TE_tw_hash0_retweet_comment  TE_tw_hash0_like  TE_tw_rt_uhash_reply  \\\n",
       "0                     0.007505          0.445434              0.034587   \n",
       "1                     0.007505          0.445434              0.016520   \n",
       "2                     0.007505          0.445434              0.034587   \n",
       "3                     0.007505          0.445434              0.001847   \n",
       "4                     0.007505          0.445434              0.034587   \n",
       "\n",
       "   TE_tw_rt_uhash_retweet  TE_tw_rt_uhash_retweet_comment  \\\n",
       "0                0.095342                        0.007865   \n",
       "1                0.070500                        0.004699   \n",
       "2                0.095342                        0.007865   \n",
       "3                0.138021                        0.005790   \n",
       "4                0.095342                        0.007865   \n",
       "\n",
       "   TE_tw_rt_uhash_like  TE_mult_reply  TE_mult_retweet  \\\n",
       "0             0.512623       0.075886         0.119572   \n",
       "1             0.473749       0.004349         0.135523   \n",
       "2             0.512623       0.014791         0.201175   \n",
       "3             0.437448       0.006334         0.144244   \n",
       "4             0.512623       0.018369         0.069014   \n",
       "\n",
       "   TE_mult_retweet_comment  TE_mult_like  TE_media_elapsed_time  \\\n",
       "0                 0.014521      0.703054          -1.581221e+09   \n",
       "1                 0.004298      0.343614          -1.581273e+09   \n",
       "2                 0.011906      0.547681          -1.581260e+09   \n",
       "3                 0.010178      0.423202          -1.581273e+09   \n",
       "4                 0.006594      0.517109          -1.581221e+09   \n",
       "\n",
       "   TE_tweet_type_elapsed_time  TE_language_elapsed_time  \\\n",
       "0               -1.581206e+09             -1.581185e+09   \n",
       "1               -1.581252e+09             -1.581253e+09   \n",
       "2               -1.581206e+09             -1.581222e+09   \n",
       "3               -1.581252e+09             -1.581185e+09   \n",
       "4               -1.581206e+09             -1.581185e+09   \n",
       "\n",
       "   TE_a_user_id_elapsed_time  TE_b_user_id_elapsed_time  CE_media_norm  \\\n",
       "0              -1.581258e+09              -1.581258e+09       0.183514   \n",
       "1              -1.581258e+09              -1.581258e+09       0.070948   \n",
       "2              -1.581258e+09              -1.581258e+09       0.012145   \n",
       "3              -1.581258e+09              -1.581258e+09       0.070948   \n",
       "4              -1.581258e+09              -1.581258e+09       0.183514   \n",
       "\n",
       "     CE_media  CE_tweet_type_norm  CE_tweet_type  CE_language_norm  \\\n",
       "0  27279102.0            0.578340     84679896.0          0.447992   \n",
       "1  10919567.0            0.329188     48525540.0          0.049365   \n",
       "2   1637619.0            0.578340     84679896.0          0.006631   \n",
       "3  10919567.0            0.329188     48525540.0          0.447992   \n",
       "4  27279102.0            0.578340     84679896.0          0.447992   \n",
       "\n",
       "   CE_language  CE_a_user_id_norm  CE_a_user_id  CE_b_user_id_norm  \\\n",
       "0   63148424.0       3.618880e-07          54.0       1.206293e-07   \n",
       "1    7510074.0       4.020978e-08           3.0       2.010489e-07   \n",
       "2    1035820.0       1.568181e-05         420.0       4.020978e-08   \n",
       "3   63148424.0       8.041955e-08          14.0       2.412587e-07   \n",
       "4   63148424.0       1.005244e-06         181.0       1.206293e-07   \n",
       "\n",
       "   CE_b_user_id  DE_b_user_id_b_follower_count_1  \\\n",
       "0           7.0                             -8.0   \n",
       "1          17.0                              0.0   \n",
       "2           1.0                              0.0   \n",
       "3          18.0                             -2.0   \n",
       "4           3.0                              0.0   \n",
       "\n",
       "   DE_b_user_id_b_follower_count_-1  DE_b_user_id_b_following_count_1  \\\n",
       "0                               0.0                              -1.0   \n",
       "1                               0.0                               0.0   \n",
       "2                               0.0                               0.0   \n",
       "3                               0.0                              -1.0   \n",
       "4                               1.0                               0.0   \n",
       "\n",
       "   DE_b_user_id_b_following_count_-1  DE_b_user_id_language_1  \\\n",
       "0                                0.0                      0.0   \n",
       "1                                0.0                     -7.0   \n",
       "2                                0.0                      0.0   \n",
       "3                                0.0                      0.0   \n",
       "4                               -5.0                      0.0   \n",
       "\n",
       "   DE_b_user_id_language_-1  same_language  diff_language  nan_language  \\\n",
       "0                       0.0              1              0             0   \n",
       "1                      -7.0              0              1             0   \n",
       "2                       0.0              0              0             1   \n",
       "3                       0.0              0              0             1   \n",
       "4                       0.0              1              0             0   \n",
       "\n",
       "   a_ff_rate  b_ff_rate  ab_fing_rate   ab_fer_rate       a_age       b_age  \\\n",
       "0   0.087225  45.746666      2.026667      0.507906   51.000000   22.966667   \n",
       "1   0.537634   0.145397      0.104603      1.338130  122.466667   94.666667   \n",
       "2   0.000004   0.164948      0.010309  15654.375000   86.400000   29.533333   \n",
       "3   0.786822   0.663781      0.585859      1.121739   50.700000   71.033333   \n",
       "4   0.013946   0.121970      0.071149     41.829060  132.833333  108.733333   \n",
       "\n",
       "   ab_age_dff  ab_age_rate  a_follows_b  b_follows_a_2  a_follows_b_2  \\\n",
       "0  -28.033333     2.220610            0              0              0   \n",
       "1  -27.800000     1.293662            0              0              0   \n",
       "2  -56.900000     2.925508            0              0              0   \n",
       "3   20.333333     0.713749            0              0              0   \n",
       "4  -24.100000     1.221643            0              0              0   \n",
       "\n",
       "   ab_freq_s  ab_freq_d  ab_freq_sd  tweets_prev_s5  tweets_prev_s60  \\\n",
       "0          1          0           1               0                0   \n",
       "1          1          0           1               0                0   \n",
       "2          1          0           1               0                0   \n",
       "3          1          0           1               0                0   \n",
       "4          1          0           1               0                0   \n",
       "\n",
       "   tweets_prev_s240  tweets_prev_s480  tweets_prev_s1440  eng_after_s5  \\\n",
       "0                 0                 0                  1             0   \n",
       "1                 0                 0                  0             0   \n",
       "2                 0                 0                  0             0   \n",
       "3                 0                 0                  0             0   \n",
       "4                 0                 0                  0             0   \n",
       "\n",
       "   eng_prev_s5  eng_after_s60  eng_prev_s60  eng_after_s240  eng_prev_s240  \\\n",
       "0            0              0             0               0              0   \n",
       "1            0              0             0               0              0   \n",
       "2            0              0             0               0              0   \n",
       "3            0              0             0               0              0   \n",
       "4            0              0             0               0              0   \n",
       "\n",
       "   eng_after_s480  eng_prev_s480  eng_after_s1440  eng_prev_s1440  \n",
       "0               0              0                0               1  \n",
       "1               0              0                1               1  \n",
       "2               0              0                0               0  \n",
       "3               0              0                1               0  \n",
       "4               0              0                0               0  "
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "valid.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr_arr = valid[['tr', 'id']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "no dup :) \n",
      "X_train.shape (13182566, 115)\n",
      "X_valid.shape (24869573, 115)\n"
     ]
    }
   ],
   "source": [
    "# CREATE TRAIN AND VALIDATION SETS\n",
    "RMV = [c for c in DONT_USE if c in train.columns]\n",
    "\n",
    "X_train = train.drop(RMV, axis=1)\n",
    "Y_train = train[label_names]\n",
    "del train\n",
    "gc.collect()\n",
    "\n",
    "X_valid = valid[X_train.columns]\n",
    "#Y_valid = valid[label_names]\n",
    "del valid\n",
    "gc.collect()\n",
    "\n",
    "if X_train.columns.duplicated().sum()>0:\n",
    "    raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')\n",
    "print('no dup :) ')\n",
    "print(f'X_train.shape {X_train.shape}')\n",
    "print(f'X_valid.shape {X_valid.shape}')\n",
    "\n",
    "# I'M NOT A FAN OF REDUCING TO FLOAT16\n",
    "#utils.reduce_mem_usage(X_train)\n",
    "#utils.reduce_mem_usage(X_valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "sub_pub = pd.read_csv('../preprocessings/sample_submission_public.csv')\n",
    "sub_priv = pd.read_csv('../preprocessings/sample_submission_private.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((12434735, 3), (12434838, 3))"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sub_pub.shape, sub_priv.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(12434735, 12434838)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.sum(tr_arr['tr']==0), np.sum(tr_arr['tr']==1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['reply', 'retweet', 'retweet_comment', 'like']"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "label_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "import dask as dask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 43.5 s, sys: 1.89 s, total: 45.4 s\n",
      "Wall time: 43.6 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# Dask dataframe\n",
    "dX_train = dask.dataframe.from_pandas(X_train, npartitions=8)\n",
    "dY_train = dask.dataframe.from_pandas(Y_train, npartitions=8)\n",
    "dX_valid = dask.dataframe.from_pandas(X_valid, npartitions=8)\n",
    "dX_tr_arr = dask.dataframe.from_pandas(tr_arr, npartitions=8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 766 ms, sys: 345 ms, total: 1.11 s\n",
      "Wall time: 1.12 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "# CuDF Dask\n",
    "ddX_train = dask_cudf.from_dask_dataframe(dX_train)\n",
    "ddY_train = dask_cudf.from_dask_dataframe(dY_train)\n",
    "ddX_valid = dask_cudf.from_dask_dataframe(dX_valid)\n",
    "ddX_tr_arr = dask_cudf.from_dask_dataframe(dX_tr_arr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "a_is_verified\n",
      "b_is_verified\n",
      "b_follows_a\n"
     ]
    }
   ],
   "source": [
    "for c in ddX_train.columns:\n",
    "    if str(ddX_train[c].dtype)=='bool': \n",
    "        ddX_train[c] = ddX_train[c].astype('int8')\n",
    "        ddX_valid[c] = ddX_valid[c].astype('int8')\n",
    "        print (c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<table style=\"border: 2px solid white;\">\n",
       "<tr>\n",
       "<td style=\"vertical-align: top; border: 0px solid white\">\n",
       "<h3 style=\"text-align: left;\">Client</h3>\n",
       "<ul style=\"text-align: left; list-style: none; margin: 0; padding: 0;\">\n",
       "  <li><b>Scheduler: </b>tcp://127.0.0.1:42677</li>\n",
       "  <li><b>Dashboard: </b><a href='http://127.0.0.1:8787/status' target='_blank'>http://127.0.0.1:8787/status</a></li>\n",
       "</ul>\n",
       "</td>\n",
       "<td style=\"vertical-align: top; border: 0px solid white\">\n",
       "<h3 style=\"text-align: left;\">Cluster</h3>\n",
       "<ul style=\"text-align: left; list-style:none; margin: 0; padding: 0;\">\n",
       "  <li><b>Workers: </b>8</li>\n",
       "  <li><b>Cores: </b>8</li>\n",
       "  <li><b>Memory: </b>429.50 GB</li>\n",
       "</ul>\n",
       "</td>\n",
       "</tr>\n",
       "</table>"
      ],
      "text/plain": [
       "<Client: 'tcp://127.0.0.1:42677' processes=8 threads=8, memory=429.50 GB>"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "client"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loadtest\n",
      "#########################\n",
      "### reply\n",
      "#########################\n",
      "Training...\n",
      "Took 81.4 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 16.3 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "Training...\n",
      "Took 78.7 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 15.9 seconds\n",
      "Combining...\n",
      "Took 2.1 seconds\n",
      "\n",
      "Training...\n",
      "Took 78.3 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 15.8 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "\n",
      "#########################\n",
      "### retweet\n",
      "#########################\n",
      "Training...\n",
      "Took 73.6 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 19.4 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "Training...\n",
      "Took 74.6 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 19.0 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "Training...\n",
      "Took 74.7 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 19.4 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "\n",
      "#########################\n",
      "### retweet_comment\n",
      "#########################\n",
      "Training...\n",
      "Took 46.6 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 13.7 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "Training...\n",
      "Took 46.9 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 13.6 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "Training...\n",
      "Took 45.5 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 13.5 seconds\n",
      "Combining...\n",
      "Took 2.1 seconds\n",
      "\n",
      "\n",
      "#########################\n",
      "### like\n",
      "#########################\n",
      "Training...\n",
      "Took 42.1 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 13.9 seconds\n",
      "Combining...\n",
      "Took 2.1 seconds\n",
      "\n",
      "Training...\n",
      "Took 41.3 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 11.5 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "Training...\n",
      "Took 41.9 seconds\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/rapids/lib/python3.6/site-packages/ipykernel_launcher.py:32: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicing...\n",
      "Took 11.6 seconds\n",
      "Combining...\n",
      "Took 2.2 seconds\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# TRAIN AND VALIDATE\n",
    "LOOP1 = 3\n",
    "NROUNDS = [554, 708, 448, 353] \n",
    "VERBOSE_EVAL = 50\n",
    "\n",
    "print('loadtest')\n",
    "\n",
    "dtest = xgb.dask.DaskDMatrix(client,data=ddX_valid)\n",
    "\n",
    "for i in range(4):\n",
    "    name = label_names[i]\n",
    "    print('#'*25);print('###',name);print('#'*25)\n",
    "    \n",
    "    sub_pub[name] = 0\n",
    "    sub_priv[name] = 0\n",
    "    #models = []\n",
    "    \n",
    "    dtrain = xgb.dask.DaskDMatrix(client,data=ddX_train,label=ddY_train.iloc[:, i])\n",
    "    \n",
    "    for j in range(LOOP1):\n",
    "        xgb_parms['seed'] = j\n",
    "                        \n",
    "        start = time.time(); print('Training...')\n",
    "        model = xgb.dask.train(client, xgb_parms, \n",
    "                               dtrain=dtrain,\n",
    "                               num_boost_round=NROUNDS[i],\n",
    "                               verbose_eval=VERBOSE_EVAL) \n",
    "        print('Took %.1f seconds'%(time.time()-start))\n",
    "        \n",
    "        tr_arr2 = dX_tr_arr.compute().as_matrix()\n",
    "        pd_tr_arr2 = pd.DataFrame(tr_arr2, columns=['tr', 'id'])\n",
    "        \n",
    "        start = time.time(); print('Predicing...')\n",
    "        pred = xgb.dask.predict(client,model,dtest).compute()\n",
    "        print('Took %.1f seconds'%(time.time()-start))\n",
    "        \n",
    "        start = time.time(); print('Combining...')\n",
    "        pd_tr_arr2['pred'] = pred\n",
    "        pd_tr_arr2 = pd_tr_arr2.sort_values('id')\n",
    "        \n",
    "        sub_pub[name] += pd_tr_arr2.loc[pd_tr_arr2['tr']==0, 'pred'].values\n",
    "        sub_priv[name] += pd_tr_arr2.loc[pd_tr_arr2['tr']==1, 'pred'].values\n",
    "        print('Took %.1f seconds'%(time.time()-start))\n",
    "        #models.append(model)\n",
    "        \n",
    "        if i<3:\n",
    "            del model, pd_tr_arr2\n",
    "            gc.collect()\n",
    "        print()\n",
    "    \n",
    "    if i<3:\n",
    "        del dtrain\n",
    "        gc.collect()\n",
    "    print()\n",
    "        \n",
    "sub_pub.iloc[:, 2:] /= LOOP1\n",
    "sub_priv.iloc[:, 2:] /= LOOP1\n",
    "sub_pub.to_parquet('sub_pub_1334_mulit_v2.parquet')\n",
    "sub_priv.to_parquet('sub_priv_1334_mulit_v2.parquet')\n",
    "#sub.to_csv('sub_like_%i.csv'%VER,index=False,header=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
