{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "helper load finish!!!\n"
     ]
    }
   ],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "from base_helper import *\n",
    "test_op_rd1 = get_operation_round1_new()\n",
    "train_op_tr = get_operation_train_new()\n",
    "train_tst = get_transaction_train_new()\n",
    "test_op_rd1 = get_transaction_round1_new()\n",
    "\n",
    "tag = get_tag_train_new()\n",
    "# 测试集没有tag\n",
    "# op_rd1[tag_hd.Tag] = -1\n",
    "# print(len(list(set(op_rd1[tag_hd.UID].tolist()))))\n",
    "# op_tr = pd.merge(left=op_tr,right=tag, on=tag_hd.UID)\n",
    "# cols = operation_header + [tag_hd.Tag]\n",
    "# op_merge = pd.concat([op_tr[cols],op_rd1[cols]])\n",
    "# op_merge.head(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs = pd.merge(train_op_tr, tag, on='UID', how='left')\n",
    "train_tsts =  pd.merge(train_tst, tag, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# len(tag.UID.unique().tolist())\n",
    "\n",
    "# len(set(train_tst.UID.unique()) & set(train_tst.UID.unique()))\n",
    "\n",
    "# (set(train_tst.UID.unique()) & set(train_tst.UID.unique())) - set(tag.UID.unique().tolist())\n",
    "\n",
    "# len(tag[tag.Tag == 1])\n",
    "\n",
    "# len(set(train_tst[train_tst.Tag == 1]['UID'].unique()) & set(train_op_tr[train_op_tr.Tag == 1]['UID'].unique()))\n",
    "\n",
    "# (set(train_tst[train_tst.Tag == 1]['UID']) & set(train_op_tr[train_op_tr.Tag == 1]['UID']))\n",
    "\n",
    "# loss_uid = set(tag[tag.T\n",
    "# print(len(loss_uid))\n",
    "# print(list(loss_uid)[:4])\n",
    "\n",
    "# tag.groupby('Tag').count().plot(kind='bar')\n",
    "\n",
    "# tag.groupby('Tag').count()\n",
    "\n",
    "# tag.Tag.hist()\n",
    "\n",
    "# train_op_trs = pd.merge(train_op_tr, tag, on='UID', how='left')\n",
    "# train_tsts =  pd.merge(train_tst, tag, on='UID', how='left')\n",
    "\n",
    "# 提取相关的feature"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# day相关的feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature = pd.DataFrame({'UID':[i for i in train_op_trs.UID.unique()]})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_day = train_op_trs.drop_duplicates(subset=['UID', 'day']).\\\n",
    "reset_index(drop=True)[['UID', 'day']].rename({'day':'day_count'}, axis=1)\n",
    "train_day = train_day.groupby('UID', as_index=False).day_count.count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature = pd.merge(train_op_feature, train_day, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>UID</th>\n",
       "      <th>day_count</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>10035</td>\n",
       "      <td>23</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>16264</td>\n",
       "      <td>21</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>13162</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>21392</td>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>18599</td>\n",
       "      <td>13</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "     UID  day_count\n",
       "0  10035         23\n",
       "1  16264         21\n",
       "2  13162          3\n",
       "3  21392          9\n",
       "4  18599         13"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_op_feature.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.UID == 10035]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# mode字段的相关分析与提取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.UID == 10035]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs['mode'].isnull()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_mode = train_op_trs.drop_duplicates(subset=['UID','day', 'mode'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = train_mode['mode'].value_counts().index.tolist()\n",
    "b = [i+1 for i in range(15)] + ([-1]*(len(a) - 15))\n",
    "dicts = dict(zip(a,b))\n",
    "train_mode['mode'] = train_mode['mode'].map(dicts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "temp = train_mode.groupby(['UID', 'mode'])['day'].count()\n",
    "temp = temp.unstack('mode').reset_index()\n",
    "cols = temp.columns.tolist()\n",
    "temp.columns = cols\n",
    "temp.columns = [temp.columns[0]] + ['mode'+'_'+ str(i) for i in temp.columns[1:]]\n",
    "temp.fillna(0,inplace=True)\n",
    "train_op_feature = pd.merge(train_op_feature, temp, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in train_op_feature.columns[2:]:\n",
    "    train_op_feature[i] = train_op_feature[i] / train_op_feature['day_count']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 操作成功率与失败率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs['success'].isnull()].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs['success'].isnull()].os.unique()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.os == 200].head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_success = train_op_trs[~train_op_trs.success.isnull()].reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_success_1 = train_success.groupby(by=['UID', 'success'], as_index=False).count()[['UID','success','day']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_successa_all = train_success_1.groupby('UID', as_index=False).day.sum()\n",
    "train_success_1 = train_success_1[train_success_1['success'] == 1.0].reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_success_1.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_successa_all.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(train_success_1.shape, train_successa_all.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "set(train_successa_all.UID.unique()) - set(train_success_1.UID.unique())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.UID == 62250]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "temp = pd.merge(train_successa_all, train_success_1, on='UID')\n",
    "temp['success_ratio'] = temp['day_y'] / temp['day_x']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "temp = temp[['UID', 'success_ratio']]\n",
    "train_op_feature = pd.merge(train_op_feature, temp, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.success_ratio.fillna(0,inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# time处理方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_time = train_op_trs.copy()\n",
    "train_time.time = train_time.time.str[:2]\n",
    "\n",
    "train_time.time = train_time.time.astype('int')\n",
    "\n",
    "train_time_7_12 = train_time[(train_time.time > 7) & (train_time.time <= 12)].reset_index(drop=True)\n",
    "train_time_12_19 = train_time[(train_time.time > 12) & (train_time.time <= 19)].reset_index(drop=True)\n",
    "train_time_19_24 = train_time[(train_time.time > 19) & (train_time.time <= 24)].reset_index(drop=True)\n",
    "train_time_24_7 = train_time[(train_time.time > 1) & (train_time.time <= 7)].reset_index(drop=True)\n",
    "\n",
    "train_time_7_12 = train_time_7_12.groupby('UID', as_index=False).time.count()\n",
    "train_time_12_19 = train_time_12_19.groupby('UID', as_index=False).time.count()\n",
    "train_time_19_24 = train_time_19_24.groupby('UID', as_index=False).time.count()\n",
    "train_time_24_7 = train_time_24_7.groupby('UID', as_index=False).time.count()\n",
    "\n",
    "train_time_7_12.rename({'time':'time_7_12'}, axis=1, inplace=True)\n",
    "train_time_12_19.rename({'time':'time_12_19'}, axis=1, inplace=True)\n",
    "train_time_19_24.rename({'time':'time_19_24'}, axis=1, inplace=True)\n",
    "train_time_24_7.rename({'time':'time_24_7'}, axis=1, inplace=True)\n",
    "\n",
    "train_op_feature = pd.merge(train_op_feature, train_time_7_12, on='UID', how='left')\n",
    "train_op_feature = pd.merge(train_op_feature, train_time_12_19, on='UID', how='left')\n",
    "train_op_feature = pd.merge(train_op_feature, train_time_19_24, on='UID', how='left')\n",
    "train_op_feature = pd.merge(train_op_feature, train_time_24_7, on='UID', how='left')\n",
    "\n",
    "train_op_feature.time_7_12.fillna(0,inplace=True)\n",
    "train_op_feature.time_12_19.fillna(0,inplace=True)\n",
    "train_op_feature.time_19_24.fillna(0,inplace=True)\n",
    "train_op_feature.time_24_7.fillna(0,inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# os操作系统的处理分析方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.os.isnull()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.os == 200]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test = train_op_trs[train_op_trs.os == 200]\n",
    "print(len(test[test.Tag == 1].UID.unique()))\n",
    "# len(test.UID.unique())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_1 = test[test.Tag == 1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_1.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "test_1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test[~test.geo_code.isnull()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test['mode'].unique()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test[test.Tag == 0]['mode'].unique()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_1['mode'].unique()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs.os.unique().tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 101 不知道是什么操作系统的情况\n",
    "# 102 安卓手机\n",
    "# 103 苹果系列手机\n",
    "# 104 电脑操作\n",
    "# 105 电脑操作 +  mode=”8e463287d7146285 ”强操作，一定为欺诈用户\n",
    "# 107 大部分电脑操作 + mode=‘d25caee90b27fa9b ’强操作， 一定为欺诈用户\n",
    "# "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.os == 103]['device2'].unique()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'d25caee90b27fa9b'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test = train_op_trs[train_op_trs['mode'] == '8e463287d7146285']\n",
    "test[test.Tag == 1].os.unique().tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_os = train_op_trs.copy()\n",
    "train_os.drop_duplicates(subset=['UID', 'os'], inplace=True)\n",
    "train_os = train_os.groupby('UID', as_index=False).os.count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature = pd.merge(train_op_feature, train_os, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 是否为苹果手机"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "train_apple = train_op_trs.copy()\n",
    "train_apple.device_code3 = train_apple.device_code3.fillna(0)\n",
    "train_apple.device_code1 = train_apple.device_code1.fillna(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_apple.device_code3= train_apple.device_code3.apply(lambda x: x if x == 0 else 1)\n",
    "train_apple.device_code1 = train_apple.device_code1.apply(lambda x: x if x == 0 else 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_apple.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_apple = train_apple.groupby('UID', as_index=False)[['device_code1', 'device_code3']].sum()\n",
    "train_apple['is_apple'] = np.where(train_apple.device_code1 > train_apple.device_code3, 0,1)\n",
    "train_apple.drop(['device_code1', 'device_code3'], axis=1, inplace=True)\n",
    "train_op_feature = pd.merge(train_op_feature, train_apple, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 同一个用户不同的设备个数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.ip2_sub.notnull()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_device = train_op_trs.copy()\n",
    "\n",
    "train_device.device2 = np.where((train_device.device2.isnull()) & (train_device.ip2_sub.isnull()), \n",
    "                                0,train_device.device2)\n",
    "train_device.device2 = train_device.device2.fillna(1)\n",
    "\n",
    "train_device = train_device.groupby(['UID', 'device2'], as_index=False).count()\n",
    "print(len(train_device.UID.unique()))\n",
    "train_device = train_device.groupby(['UID'], as_index=False)['device2'].agg({'device_count':'count'})\n",
    "print(len(train_device.UID.unique()))\n",
    "\n",
    "train_op_feature = pd.merge(train_op_feature, train_device, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# mac1分析的个数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs[train_op_trs.UID == 10000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''统计用户mac地址的个数'''\n",
    "train_mac = train_op_trs.copy()\n",
    "train_mac.mac1 = train_mac.mac1.fillna(0)\n",
    "train_mac = train_mac.groupby(['UID', 'mac1'], as_index=False)['day'].agg({'mac_count':'count'})\n",
    "train_mac['ratio'] = np.where(train_mac.mac1 == 0, 0, 1)\n",
    "train_mac_count = train_mac.groupby('UID', as_index=False)['mac1'].count()\n",
    "train_op_feature = pd.merge(train_op_feature, train_mac_count, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''统计mac地址的缺失率'''\n",
    "train_mac_loss = train_op_trs.groupby('UID', as_index=False).count()\n",
    "train_mac_loss['mac_loss'] = train_mac_loss.mac1 / train_mac_loss.day\n",
    "train_mac_loss = train_mac_loss[['UID', 'mac_loss']]\n",
    "train_op_feature = pd.merge(train_op_feature, train_mac_loss, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ip地址统计分析，以及是不是电脑"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''统计分析有多少个ip以及有是否同时在电脑和手机'''\n",
    "train_ip =train_op_trs.copy()\n",
    "train_ip['ip'] = 0\n",
    "train_ip.ip1 = train_ip.ip1.fillna('0').astype(str)\n",
    "train_ip.ip2 = train_ip.ip2.fillna('0').astype(str)\n",
    "train_ip['is_phone_computer'] = np.where((train_ip.ip1!='0')&(train_ip.ip2!='0'), 0, 1)\n",
    "train_phone_computer = train_ip.groupby(['UID', 'is_phone_computer'], as_index=False).count()\n",
    "train_phone_computer = train_phone_computer.groupby('UID', as_index=False)['is_phone_computer'].count()\n",
    "train_op_feature = pd.merge(train_op_feature, train_phone_computer, on='UID', how='left')\n",
    "train_ip['ip'] = train_ip['ip1'] + train_ip['ip2']\n",
    "train_ip = train_ip.groupby(['UID', 'ip'], as_index=False).count()\n",
    "train_ip = train_ip.groupby('UID', as_index=False)['ip'].agg({'ip_number':'count'})\n",
    "train_op_feature = pd.merge(train_op_feature, train_ip, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# weifi分析,统计weifi个数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_wifi = train_op_trs.copy()\n",
    "train_wifi.wifi = train_wifi.wifi.fillna(0)\n",
    "train_wifi = train_wifi.drop_duplicates(subset=['UID', 'day', 'wifi'],keep='first')\n",
    "train_wifi = train_wifi.groupby(['UID', 'day'], as_index=False)['wifi'].count()\n",
    "train_wifi = train_wifi.groupby('UID', as_index=False).agg({'day':'count', 'wifi':'sum'})\n",
    "train_wifi['avg_day_wifi'] = train_wifi['wifi'] / train_wifi['day']\n",
    "train_wifi.drop(['day', 'wifi'], axis=1, inplace=True)\n",
    "train_op_feature = pd.merge(train_op_feature, train_wifi, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_is_wefi = train_op_trs.copy()\n",
    "train_is_wefi.wifi = train_is_wefi.wifi.fillna(0)\n",
    "train_is_wefi['wefi_loss'] = np.where(train_is_wefi.wifi == 0, 0, 1)\n",
    "\n",
    "# train_is_wefi.groupby('UID').apply(lambda x: len(x[x['wefi_loss'] == 0]) / len(x)).reset_index()\n",
    "\n",
    "funcs = lambda x: x[x == 0].count() / x.count()\n",
    "train_is_wefi = train_is_wefi.groupby('UID', as_index=False).agg({'wefi_loss':funcs})\n",
    "train_op_feature = pd.merge(train_op_feature, train_is_wefi, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# geo_code地理位置的分析与处理,提取每天地理位置的变化情况，以及地理位置缺失率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_trs.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "train_geo = train_op_trs.copy()\n",
    "train_geo.geo_code = train_geo.geo_code.fillna(0)\n",
    "train_geo = train_geo.drop_duplicates(subset=['UID', 'day', 'geo_code'],keep='first')\n",
    "train_geo = train_geo.groupby(['UID', 'day'], as_index=False)['geo_code'].count()\n",
    "train_geo = train_geo.groupby('UID', as_index=False).agg({'day':'count', 'geo_code':'sum'})\n",
    "train_geo['avg_day_geo'] = train_geo['geo_code'] / train_geo['day']\n",
    "train_geo.drop(['day', 'geo_code'], axis=1, inplace=True)\n",
    "train_op_feature = pd.merge(train_op_feature, train_geo, on='UID', how='left')\n",
    "\n",
    "train_is_geo = train_op_trs.copy()\n",
    "train_is_geo.geo_code = train_is_geo.geo_code.fillna(0)\n",
    "train_is_geo['geo_loss'] = np.where(train_is_geo.geo_code == 0, 0, 1)\n",
    "\n",
    "# train_is_wefi.groupby('UID').apply(lambda x: len(x[x['wefi_loss'] == 0]) / len(x)).reset_index()\n",
    "\n",
    "funcs = lambda x: x[x == 0].count() / x.count()\n",
    "train_is_geo = train_is_geo.groupby('UID', as_index=False).agg({'geo_loss':funcs})\n",
    "train_op_feature = pd.merge(train_op_feature, train_is_geo, on='UID', how='left')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_op_feature.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
