{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "helper load finish!!!\n"
     ]
    }
   ],
   "source": [
    "# transction table anlysis and extract\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "from base_helper import *\n",
    "train_tst = get_transaction_train_new()\n",
    "test_tst_rd = get_transaction_round1_new()\n",
    "\n",
    "tag = get_tag_train_new()\n",
    "test_tst_rd[tag_hd.Tag] = -1\n",
    "train_tst = pd.merge(train_tst, tag, on='UID', how='left')\n",
    "\n",
    "'''合并训练测试集'''\n",
    "train_tsts = pd.concat([train_tst, test_tst_rd], axis=0).reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(61116, 1) (387752, 28)\n",
      "(61116, 2) (387752, 28)\n"
     ]
    }
   ],
   "source": [
    "# # 数据异常的清除,删除channel为118的\n",
    "\n",
    "train_tsts = train_tsts[train_tsts.channel != 118].reset_index(drop=True)\n",
    "\n",
    "train_tst_feature = pd.DataFrame({'UID':[i for i in train_tsts.UID.unique()]})\n",
    "print(train_tst_feature.shape, train_tsts.shape)\n",
    "train_tst_feature = train_tst_feature.merge(train_tsts.drop_duplicates(['UID', 'Tag'])[['UID', 'Tag']],\n",
    "                                            on='UID',how='left')\n",
    "print(train_tst_feature.shape, train_tsts.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # channel是否为119\n",
    "def fucs_channel(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (119 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "train_channel = train_tsts.groupby('UID')['channel'].apply(fucs_channel).reset_index().rename(\n",
    "    {\"channel\":'channel_119'}, axis=1)\n",
    "train_tst_feature = train_tst_feature.merge(train_channel, on='UID', how='left')\n",
    "\n",
    "\n",
    "# # trans_amt, bal的处理\n",
    "\n",
    "'''交易金额是否大于3000000'''\n",
    "def fucs_trans_amt(x):\n",
    "    temp = np.max(x.values.tolist())\n",
    "    if temp > 3000000:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "train_trans_amt = train_tsts.groupby('UID')['trans_amt'].apply(fucs_trans_amt).reset_index().rename(\n",
    "    {\"trans_amt\":'trans_amt_than_119'}, axis=1)\n",
    "\n",
    "def fucs_trans_amt_102(x):\n",
    "    temp = x.values.tolist()\n",
    "    if 102 in temp:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "train_trans_amt_102 = train_tsts.groupby('UID')['trans_amt'].apply(fucs_trans_amt_102).reset_index().rename(\n",
    "    {\"trans_amt\":'trans_amt_102'}, axis=1)\n",
    "\n",
    "train_tst_feature = train_tst_feature.merge(train_trans_amt, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(train_trans_amt_102, on='UID', how='left')\n",
    "\n",
    "def fucs_bal_big(x):\n",
    "    temp = np.max(x.values.tolist())\n",
    "    if temp > 3200000:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "train_bal_big = train_tsts.groupby('UID')['bal'].apply(fucs_bal_big).reset_index().rename(\n",
    "    {\"bal\":'trans_bal_big'}, axis=1)\n",
    "\n",
    "def fucs_bal_100(x):\n",
    "    temp = x.values.tolist()\n",
    "    if 100 in temp:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "train_bal_100 = train_tsts.groupby('UID')['bal'].apply(fucs_bal_100).reset_index().rename(\n",
    "    {\"bal\":'bal_100'}, axis=1)\n",
    "\n",
    "def fucs_bal_100_260(x):\n",
    "    temp = x.values.tolist()\n",
    "    count = 0\n",
    "    for i in temp:\n",
    "        if i>100 & i < 260:\n",
    "            count += 1\n",
    "    ratio = count / len(temp)\n",
    "    return ratio\n",
    "train_bal_100_260 = train_tsts.groupby('UID')['bal'].apply(fucs_bal_100_260).reset_index().rename(\n",
    "    {\"bal\":'bal_100_260'}, axis=1)\n",
    "\n",
    "\n",
    "train_tst_feature = train_tst_feature.merge(train_bal_big, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(train_bal_100, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(train_bal_100_260, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 是不是1，8以及14，12，以及26，27，28\n",
    "\n",
    "def fucs_1(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (1 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "def fucs_8(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (8 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "def fucs_12(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (12 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "def fucs_14(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (14 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "def fucs_26(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (26 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "def fucs_27(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (27 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "def fucs_28(x):\n",
    "    temp = x.values.tolist()\n",
    "    if (28 in temp):\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "day_1 = train_tsts.groupby('UID')['day'].apply(fucs_1).reset_index().rename({\"day\":'day_is_1'}, axis=1)\n",
    "day_8 = train_tsts.groupby('UID')['day'].apply(fucs_8).reset_index().rename({\"day\":'day_is_8'}, axis=1)\n",
    "day_12= train_tsts.groupby('UID')['day'].apply(fucs_12).reset_index().rename({\"day\":'day_is_12'}, axis=1)\n",
    "day_14= train_tsts.groupby('UID')['day'].apply(fucs_14).reset_index().rename({\"day\":'day_is_14'}, axis=1)\n",
    "day_26= train_tsts.groupby('UID')['day'].apply(fucs_26).reset_index().rename({\"day\":'day_is_26'}, axis=1)\n",
    "day_27= train_tsts.groupby('UID')['day'].apply(fucs_27).reset_index().rename({\"day\":'day_is_27'}, axis=1)\n",
    "day_28= train_tsts.groupby('UID')['day'].apply(fucs_28).reset_index().rename({\"day\":'day_is_28'}, axis=1)\n",
    "train_tst_feature = train_tst_feature.merge(day_1, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(day_8, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(day_12, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(day_14, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(day_26, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(day_27, on='UID', how='left')\n",
    "train_tst_feature = train_tst_feature.merge(day_28, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易天的个数统计,平均每天的操作次数，每个时间段的操作次数\n",
    "train_tst_day = train_tsts.copy()\n",
    "\n",
    "'''每个用户的总共交易次数，以及每个用户平均每天的交易次数'''\n",
    "train_tst_feature = pd.merge(train_tst_feature, \n",
    "                        train_tst_day.groupby('UID', as_index=False)['day'].agg({'trans_count':'count'}))\n",
    "train_tst_day.drop_duplicates(['UID', 'day'], keep='first', inplace=True)\n",
    "train_tst_day = train_tst_day.groupby('UID', as_index=False)['day'].agg({'day_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_tst_day, on='UID', how='left')\n",
    "\n",
    "train_tst_feature['avg_day_trans'] = train_tst_feature.trans_count / train_tst_feature.day_count\n",
    "\n",
    "'''统计每个不同时间段的交易次数，以及每个用户每个时间段的平均交易次数'''\n",
    "train_time = train_tsts.copy()\n",
    "train_time.time = train_time.time.str[:2]\n",
    "\n",
    "train_time.time = train_time.time.astype('int')\n",
    "\n",
    "def time_help(data):\n",
    "    if (data > 7) & (data <= 12):\n",
    "        return 0\n",
    "    elif (data > 12) & (data <= 19):\n",
    "        return 1\n",
    "    elif (data > 19) & (data <= 24):\n",
    "        return 2\n",
    "    elif (data >= 0) & (data <= 7):\n",
    "        return 3\n",
    "\n",
    "train_time.time = train_time.time.map(time_help)\n",
    "train_time = pd.concat([train_time.UID, pd.get_dummies(train_time.time, prefix='time')], axis=1)\n",
    "train_time = train_time.groupby('UID', as_index=False).sum()\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_time, on='UID', how='left')\n",
    "\n",
    "time_func = lambda x: 0 if x == np.inf else x\n",
    "train_tst_feature['avg_time0_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_0).map(time_func)\n",
    "train_tst_feature['avg_time1_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_1).map(time_func)\n",
    "train_tst_feature['avg_time2_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_2).map(time_func)\n",
    "train_tst_feature['avg_time3_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_3).map(time_func)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 取每个用户的相邻两次交易天数差值的平均值，最大值，最大值\n",
    "\n",
    "train_time_sub = train_tsts.copy()\n",
    "train_time_sub.drop_duplicates(['UID','day','time'],inplace=True)\n",
    "train_time_sub.sort_values(by=['UID','day','time'],inplace=True)\n",
    "\n",
    "train_time_sub['day_shift'] = train_time_sub.groupby('UID')['day'].shift(-1)\n",
    "train_time_sub['time_shift'] = train_time_sub.groupby(['UID', 'day'])['time'].shift(-1)\n",
    "\n",
    "train_time_sub = train_time_sub[['UID', 'day', 'time', 'day_shift', 'time_shift']]\n",
    "train_time_sub['day_sub'] = train_time_sub['day_shift'] - train_time_sub['day']\n",
    "train_time_sub['time_sub'] = (pd.to_datetime(train_time_sub.time_shift, format='%H:%M:%S')\n",
    "                              -pd.to_datetime(train_time_sub.time, format='%H:%M:%S')).dt.total_seconds()\n",
    "train_day_sub = train_time_sub.copy()\n",
    "train_day_sub['day_sub'] = train_time_sub['day_sub'].fillna(0)\n",
    "train_day_sub = train_day_sub.groupby('UID', as_index=False)['day_sub'].agg({'mean','max','min'}).add_prefix('day_sub_').reset_index()\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_day_sub, on='UID', how='left')\n",
    "\n",
    "train_time_sub = train_time_sub[train_time_sub.time_sub.notnull()]\n",
    "\n",
    "train_time_sub = train_time_sub.groupby('UID', as_index=False)['time_sub'].agg({'mean','max','min'}).add_prefix('time_sub_').reset_index()\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_time_sub, on='UID', how='left')\n",
    "train_tst_feature[['time_sub_mean', 'time_sub_min', 'time_sub_max']] = train_tst_feature[['time_sub_mean', \n",
    "                                                                                          'time_sub_min', 'time_sub_max']].fillna(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易平台的个数，以及每个平台的交易金额的大小trans_amt的统计信息值\n",
    "\n",
    "'''交易平台的个数以及平均每天使用不同平台的个数'''\n",
    "train_channel_count = train_tsts.copy()\n",
    "train_channel_count.drop_duplicates(['UID', 'channel'], inplace=True)\n",
    "train_channel_count = train_channel_count.groupby('UID', as_index=False)['channel'].agg({'channel_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_channel_count, on='UID', how='left')\n",
    "\n",
    "'''平均每天使用的平台个数'''\n",
    "train_avg_channel = train_tsts.copy()\n",
    "train_avg_channel.drop_duplicates(['UID', 'day', 'channel'], inplace=True)\n",
    "train_avg_channel = train_avg_channel.groupby('UID', as_index=False)['channel'].agg({'channel_day_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_avg_channel, on='UID', how='left')\n",
    "\n",
    "'''每个平台的交易金额的统计信息值'''\n",
    "train_channel_amt = train_tsts.copy()\n",
    "train_channel_amt = train_channel_amt.groupby(['UID','channel'])['trans_amt'].agg({'mean','max','min'}).add_prefix('amt_channel_').unstack('channel')\n",
    "train_channel_amt.columns = [x[0]+\"_\"+str(x[1]) for x in train_channel_amt.columns.ravel()]\n",
    "train_channel_amt.fillna(0, inplace=True)\n",
    "train_channel_amt.reset_index(inplace=True)\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_channel_amt, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易金额和余额的大小trans_amt，bal的统计分析\n",
    "\n",
    "'''每个用户的钱的大小'''\n",
    "train_tst_feature = train_tst_feature.merge(train_tsts.groupby('UID', as_index=False)['trans_amt']\n",
    "                                            .agg({'trans_amt_all':'sum'}), on='UID', how='left')\n",
    "'''每个用户的余额大小'''\n",
    "train_tst_feature = train_tst_feature.merge(train_tsts.groupby('UID', as_index=False)['bal']\n",
    "                                            .agg({'bal_all':'sum'}), on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 营销活动的相关特征，每天参与营销活动的类型，一共参与的营销活动，每个营销类型的金额相关特征\n",
    "\n",
    "'''平均每天参与的营销活动次数'''\n",
    "train_market_code = train_tsts.copy()\n",
    "train_market_code['market_code'].fillna(0, inplace=True)\n",
    "train_market_code.drop_duplicates(['UID','day', 'market_code'], inplace=True)\n",
    "train_market_code = train_market_code.groupby('UID', as_index=False)['market_code'].agg({'mark_code_day':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_market_code, on='UID', how='left')\n",
    "\n",
    "'''平均每天参与不同营销活动的次数'''\n",
    "train_market_code_diff = train_tsts.copy()\n",
    "train_market_code_diff['market_code'].fillna(0, inplace=True)\n",
    "train_market_code_diff.drop_duplicates(['UID', 'market_code'], inplace=True)\n",
    "train_market_code_diff = train_market_code_diff.groupby('UID', as_index=False)['market_code'].agg({'mark_code_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_market_code_diff, on='UID', how='left')\n",
    "\n",
    "'''不同的营销类型的金额统计信息'''\n",
    "train_market_type = train_tsts.copy()\n",
    "train_market_type['market_type'].fillna(0, inplace=True)\n",
    "train_market_type = train_market_type.groupby(['UID','market_type'])['trans_amt'].agg({'mean','max','min'}).add_prefix('market_type_').unstack()\n",
    "train_market_type.columns = [x[0]+\"_\"+str(x[1]) for x in train_market_type.columns.ravel()]\n",
    "train_market_type.fillna(0, inplace=True)\n",
    "train_market_type.reset_index(inplace=True)\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_market_type, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易类型的统计分析以及和交易金额的大小之间的关系信息\n",
    "\n",
    "'''平均每天交易类型1,2的次数'''\n",
    "train_type_1 = train_tsts.copy()\n",
    "train_type_1.drop_duplicates(['UID', 'day', 'trans_type1'], inplace=True)\n",
    "train_type_1 = train_type_1.groupby('UID', as_index=False)['trans_type1'].agg({'trans_type1_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_type_1, on='UID', how='left')\n",
    "\n",
    "\n",
    "train_type_2 = train_tsts.copy()\n",
    "train_type_2.drop_duplicates(['UID', 'day', 'trans_type2'], inplace=True)\n",
    "train_type_2 = train_type_2.groupby('UID', as_index=False)['trans_type2'].agg({'trans_type2_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_type_2, on='UID', how='left')\n",
    "\n",
    "'''每个用户的交类型2的金额统计信息'''\n",
    "train_type2_loss = train_tsts.copy()\n",
    "train_type2_loss['trans_type2'].fillna(0, inplace=True)\n",
    "train_type2_loss = train_type2_loss.groupby(['UID'], as_index=False)['trans_type2'].agg({\n",
    "    'trans_type_loss':lambda x: x[x==0].count()/ x.count()})\n",
    "train_tst_feature = train_tst_feature.merge(train_type2_loss, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # acc_id1分析\n",
    "\n",
    "'''用户帐号相关accid1'''\n",
    "train_tst_accid1 = train_tsts.copy()\n",
    "train_tst_accid1.fillna(0, inplace=True)\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid1.groupby('UID', as_index=False)['acc_id1'].agg({\n",
    "    'acc_id1_loss': lambda x: x[x==0].count() / x.count()\n",
    "}))\n",
    "train_tst_accid1.drop_duplicates(['UID','acc_id1'], inplace=True)\n",
    "train_tst_accid1 = train_tst_accid1.groupby('UID', as_index=False)['acc_id1'].agg({'acc_id1_count':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid1, on='UID', how='left')\n",
    "\n",
    "train_tst_accid1_day = train_tsts.copy()\n",
    "train_tst_accid1_day.fillna(0, inplace=True)\n",
    "train_tst_accid1_day.drop_duplicates(['UID','day','acc_id1'], inplace=True)\n",
    "train_tst_accid1_day = train_tst_accid1_day.groupby('UID', as_index=False)['acc_id1'].agg({'acc_id1_day':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid1_day, on='UID', how='left')\n",
    "\n",
    "\n",
    "'''用户帐号相关accid2'''\n",
    "train_tst_accid2 = train_tsts.copy()\n",
    "train_tst_accid2.fillna(0, inplace=True)\n",
    "train_tst_accid2.drop_duplicates(['UID','acc_id2'], inplace=True)\n",
    "train_tst_accid2 = train_tst_accid2.groupby('UID', as_index=False)['acc_id2'].agg({'acc_id2_count':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid2, on='UID', how='left')\n",
    "\n",
    "train_tst_accid2_day = train_tsts.copy()\n",
    "train_tst_accid2_day.fillna(0, inplace=True)\n",
    "train_tst_accid2_day.drop_duplicates(['UID','day','acc_id2'], inplace=True)\n",
    "train_tst_accid2_day = train_tst_accid2_day.groupby('UID', as_index=False)['acc_id2'].agg({'acc_id2_day':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid2_day, on='UID', how='left')\n",
    "\n",
    "'''用户帐号相关accid3'''\n",
    "train_tst_accid3 = train_tsts.copy()\n",
    "train_tst_accid3.fillna(0, inplace=True)\n",
    "train_tst_accid3.drop_duplicates(['UID','acc_id3'], inplace=True)\n",
    "train_tst_accid3 = train_tst_accid3.groupby('UID', as_index=False)['acc_id3'].agg({'acc_id3_count':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid3, on='UID', how='left')\n",
    "\n",
    "train_tst_accid3_day = train_tsts.copy()\n",
    "train_tst_accid3_day.fillna(0, inplace=True)\n",
    "train_tst_accid3_day.drop_duplicates(['UID','day','acc_id3'], inplace=True)\n",
    "train_tst_accid3_day = train_tst_accid3_day.groupby('UID', as_index=False)['acc_id3'].agg({'acc_id3_day':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_accid3_day, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易资金类型amt_src1\n",
    "\n",
    "train_amt_src = train_tsts.copy()\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_amt_src.groupby('UID', as_index=False)['amt_src1'].count())\n",
    "train_amt_src.drop_duplicates(['UID', 'day', 'amt_src1'], inplace=True)\n",
    "train_amt_src = train_amt_src.groupby('UID', as_index=False)['amt_src1'].agg({'amt_src_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_amt_src, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['UID', 'Tag', 'channel_119', 'trans_amt_than_119', 'trans_amt_102',\n",
       "       'trans_bal_big', 'bal_100', 'bal_100_260', 'day_is_1', 'day_is_8',\n",
       "       'day_is_12', 'day_is_14', 'day_is_26', 'day_is_27', 'day_is_28',\n",
       "       'trans_count', 'day_count', 'avg_day_trans', 'time_0', 'time_1',\n",
       "       'time_2', 'time_3', 'avg_time0_trans', 'avg_time1_trans',\n",
       "       'avg_time2_trans', 'avg_time3_trans', 'day_sub_min', 'day_sub_max',\n",
       "       'day_sub_mean', 'time_sub_min', 'time_sub_max', 'time_sub_mean',\n",
       "       'channel_count', 'channel_day_count', 'amt_channel_min_102',\n",
       "       'amt_channel_min_106', 'amt_channel_min_119', 'amt_channel_min_140',\n",
       "       'amt_channel_min_141', 'amt_channel_max_102', 'amt_channel_max_106',\n",
       "       'amt_channel_max_119', 'amt_channel_max_140', 'amt_channel_max_141',\n",
       "       'amt_channel_mean_102', 'amt_channel_mean_106', 'amt_channel_mean_119',\n",
       "       'amt_channel_mean_140', 'amt_channel_mean_141', 'trans_amt_all',\n",
       "       'bal_all', 'mark_code_day', 'mark_code_count', 'market_type_min_0.0',\n",
       "       'market_type_min_1.0', 'market_type_min_2.0', 'market_type_max_0.0',\n",
       "       'market_type_max_1.0', 'market_type_max_2.0', 'market_type_mean_0.0',\n",
       "       'market_type_mean_1.0', 'market_type_mean_2.0', 'trans_type1_count',\n",
       "       'trans_type2_count', 'trans_type_loss', 'acc_id1_loss', 'acc_id1_count',\n",
       "       'acc_id1_day', 'acc_id2_count', 'acc_id2_day', 'acc_id3_count',\n",
       "       'acc_id3_day', 'amt_src1', 'amt_src_count', 'merchant',\n",
       "       'merchant_count', 'merchant_sum_sum', 'merchant_sum_min',\n",
       "       'merchant_sum_max', 'merchant_sum_mean', 'merchant_mean_sum',\n",
       "       'merchant_mean_min', 'merchant_mean_max', 'merchant_mean_mean',\n",
       "       'trans_mer_number_sum', 'trans_mer_number_min', 'trans_mer_number_max',\n",
       "       'trans_mer_number_mean'],\n",
       "      dtype='object')"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# # 商家的信息统计方法与分析，merchant, code1, code2\n",
    "'''商家的个数，平均每天逛商家的个数'''\n",
    "train_merchant= train_tsts.copy()\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_merchant.groupby('UID', as_index=False)['merchant'].count())\n",
    "train_merchant.drop_duplicates(['UID', 'day', 'merchant'], inplace=True)\n",
    "train_merchant = train_merchant.groupby('UID', as_index=False)['merchant'].agg({'merchant_count':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_merchant, on='UID', how='left')\n",
    "\n",
    "temp1 = train_tst.groupby(['UID', 'merchant'], as_index=False)['trans_amt'].agg({'sum', 'mean'}).add_prefix('merchant_').reset_index()\n",
    "temp1['trans_mer_number'] = temp1['merchant_sum'] / temp1['merchant_mean']\n",
    "temp1 = temp1.groupby('UID').agg({'mean','max', 'min','sum'})\n",
    "temp1.columns = [x[0]+'_'+x[1] for x in temp1.columns.ravel()]\n",
    "temp1.reset_index(inplace=True)\n",
    "train_tst_feature = pd.merge(train_tst_feature, temp1, on='UID', how='left')\n",
    "\n",
    "train_tst_feature.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易表中地理位置信息分析\n",
    "'''平均每天变动的地理位置'''\n",
    "train_tst_geo = train_tsts.copy()\n",
    "train_tst_geo.fillna(0, inplace=True)\n",
    "train_tst_geo.drop_duplicates(['UID','geo_code'], inplace=True)\n",
    "train_tst_geo = train_tst_geo.groupby('UID', as_index=False)['geo_code'].agg({'geo_code_count':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_geo, on='UID', how='left')\n",
    "\n",
    "train_tst_geo_day = train_tsts.copy()\n",
    "train_tst_geo_day.geo_code.fillna(0)\n",
    "train_tst_geo_day = train_tst_geo_day.drop_duplicates(subset=['UID', 'day', 'geo_code'],keep='first')\n",
    "train_tst_geo_day = train_tst_geo_day.groupby(['UID'], as_index=False)['geo_code'].agg({'geo_code_day':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_tst_geo_day, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易表的设备特征提取\n",
    "\n",
    "'''一个用户用了几个设置进行交易, 以及没多少次交易换设备，以及平均每天设备个数'''\n",
    "train_device = train_tsts.copy()\n",
    "train_device.device2 = train_device.device2.fillna(0)\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_device.groupby('UID', as_index=False)['device2'].agg({'device_tst':'count'}))\n",
    "train_device.drop_duplicates(['UID','day','device2'], inplace=True)\n",
    "train_device = train_device.groupby(['UID'], as_index=False)['device2'].agg({'device_count_tst':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_device, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mac1地址的统计分析\n",
    "'''统计用户mac地址的个数'''\n",
    "train_mac = train_tsts.copy()\n",
    "train_mac.mac1 = train_mac.mac1.fillna(0)\n",
    "\n",
    "train_mac.drop_duplicates(['UID','mac1'], inplace=True)\n",
    "train_mac = train_mac.groupby('UID', as_index=False)['mac1'].agg({'mac1_tst':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_mac, on='UID', how='left')\n",
    "\n",
    "train_mac_day = train_tsts.copy()\n",
    "train_mac_day.mac1.fillna(0, inplace=True)\n",
    "train_mac_day.drop_duplicates(['UID','day','mac1'], inplace=True)\n",
    "train_mac_day = train_mac_day.groupby('UID', as_index=False)['mac1'].agg({'mac1_day':'count'})\n",
    "train_tst_feature = train_tst_feature.merge(train_mac_day, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 交易表中交易是否为苹果手机\n",
    "train_apple = train_tsts.copy()\n",
    "train_apple.device_code3 = train_apple.device_code3.fillna(0)\n",
    "train_apple.device_code1 = train_apple.device_code1.fillna(0)\n",
    "\n",
    "train_apple.device_code3= train_apple.device_code3.apply(lambda x: x if x == 0 else 1)\n",
    "train_apple.device_code1 = train_apple.device_code1.apply(lambda x: x if x == 0 else 1)\n",
    "\n",
    "train_apple = train_apple.groupby('UID', as_index=False)[['device_code1', 'device_code3']].sum()\n",
    "train_apple['is_apple_tst'] = np.where(train_apple.device_code1 > train_apple.device_code3, 0,1)\n",
    "train_apple['is_apple_an_tst'] = np.where((train_apple.device_code1!=0)&(train_apple.device_code3!=0),1,0)\n",
    "train_apple.drop(['device_code1', 'device_code3'], axis=1, inplace=True)\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_apple, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # ip地址信息的相关统计分析ip1,ip1_sub\n",
    "'''平均每天换多少个ip'''\n",
    "train_tst_ip = train_tsts.copy()\n",
    "train_tst_ip['ip1'] = train_tst_ip['ip1'].fillna(0)\n",
    "train_tst_feature = train_tst_feature.merge(train_tst_ip.groupby('UID', as_index=False)['ip1'].agg({\n",
    "    'ip1_loss': lambda x: x[x==0].count() / x.count()\n",
    "}))\n",
    "\n",
    "train_tst_ip.drop_duplicates(['UID', 'day', 'ip1'], inplace=True)\n",
    "train_tst_ip = train_tst_ip.groupby('UID', as_index=False)['ip1'].agg({'ip1_day_tst':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_tst_ip, on='UID', how='left')\n",
    "\n",
    "'''每多少次交易换一次ip'''\n",
    "train_ip_trans = train_tsts.copy()\n",
    "train_ip_trans['ip1'] = train_ip_trans['ip1'].fillna(0)\n",
    "train_ip_trans = train_ip_trans.groupby('UID', as_index=False)['ip1'].agg({'ip1_count_tst':'count'})\n",
    "train_tst_feature = pd.merge(train_tst_feature, train_ip_trans, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['UID', 'Tag', 'channel_119', 'trans_amt_than_119', 'trans_amt_102',\n",
       "       'trans_bal_big', 'bal_100', 'bal_100_260', 'day_is_1', 'day_is_8',\n",
       "       ...\n",
       "       'avg_trans_day_mac1_tst', 'avg_trans_mac1_day',\n",
       "       'avg_trans_day_mac1_day', 'avg_trans_ip1_day_tst',\n",
       "       'avg_trans_day_ip1_day_tst', 'avg_trans_ip1_count_tst',\n",
       "       'avg_trans_day_ip1_count_tst', 'avg_trans_mat_bal',\n",
       "       'avg_trans_amt_ip1_count_tst', 'avg_trans_bal_ip1_count_tst'],\n",
       "      dtype='object', length=188)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_tst_feature.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 平均化特征处理\n",
    "\n",
    "cols1 = ['channel_count', 'channel_day_count', 'amt_channel_mean_102',\n",
    "       'amt_channel_mean_106', 'amt_channel_mean_119',\n",
    "    'trans_amt_all','bal_all', 'mark_code_day', 'mark_code_count', 'market_type_mean_0.0',\n",
    "    'market_type_mean_1.0', 'market_type_mean_2.0','trans_type2_count','acc_id1_count',\n",
    "       'acc_id1_day', 'acc_id2_count', 'acc_id2_day', 'acc_id3_count',\n",
    "       'acc_id3_day', 'amt_src1', 'amt_src_count', 'merchant', 'merchant_count',\n",
    "        'merchant_mean_mean', 'merchant_mean_sum',\n",
    "       'merchant_mean_max', 'merchant_mean_min', 'merchant_sum_mean',\n",
    "       'merchant_sum_sum', 'merchant_sum_max', 'merchant_sum_min',\n",
    "       'trans_mer_number_mean', 'trans_mer_number_sum', 'trans_mer_number_max',\n",
    "       'trans_mer_number_min', 'geo_code_count', 'geo_code_day', 'device_tst',\n",
    "       'device_count_tst', 'mac1_tst', 'mac1_day','ip1_day_tst', 'ip1_count_tst'\n",
    "]\n",
    "for i in cols1:\n",
    "    train_tst_feature['avg_trans_{}'.format(i)] = train_tst_feature[i] / train_tst_feature['trans_count']\n",
    "    train_tst_feature['avg_trans_day_{}'.format(i)] = train_tst_feature[i] / train_tst_feature['day_count']\n",
    "    \n",
    "train_tst_feature['avg_trans_mat_bal'] = train_tst_feature['trans_amt_all'] / train_tst_feature['bal_all']\n",
    "\n",
    "cols2 = ['channel_count', 'channel_day_count','mark_code_day', 'mark_code_count',\n",
    "        'trans_type1_count','amt_src1', 'amt_src_count', 'merchant',\n",
    "       'merchant_count','device_tst','device_count_tst','trans_type2_count']\n",
    "\n",
    "for j in cols2:\n",
    "    train_tst_feature['avg_trans_amt_{}'.format(i)] = train_tst_feature['trans_amt_all'] / train_tst_feature[i]\n",
    "    train_tst_feature['avg_trans_bal_{}'.format(i)] = train_tst_feature['bal_all'] / train_tst_feature[i]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "train_tst_feature.to_csv('./transction_feature.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_tst_feature.to_csv('./train_tst_feature.csv', index=False, encoding='utf-8')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "30542"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(train_tst.UID.unique())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "30523"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(train_tsts[train_tsts.Tag != -1].UID.unique())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{51092,\n",
       " 51920,\n",
       " 52691,\n",
       " 53052,\n",
       " 54118,\n",
       " 54267,\n",
       " 54901,\n",
       " 56700,\n",
       " 58103,\n",
       " 58626,\n",
       " 59467,\n",
       " 59696,\n",
       " 61212,\n",
       " 62348,\n",
       " 62944,\n",
       " 63806,\n",
       " 64166,\n",
       " 65746,\n",
       " 66340}"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "set(train_tst.UID.unique()) - set(train_tsts[train_tsts.Tag != -1].UID.unique())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
