{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np                       #矩阵操作\n",
    "import pandas as pd                     #引入\n",
    "from sklearn import preprocessing        #引入独热编码，进行数据处理\n",
    "from pandas import DataFrame\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.metrics import log_loss     #采用logloss作为评价指标\n",
    "import matplotlib.pyplot as plt          #画图\n",
    "import seaborn as sns                    #画图\n",
    "import time\n",
    "\n",
    "#import list_utils\n",
    "\n",
    "%matplotlib inline\n",
    "\n",
    "from utils import *\n",
    "from FE_utils import *"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2、特征工程预处理\n",
    "#### 2.1、增加用户识别特征 （用于生成新增的7个特征）\n",
    "通过将device_id+device_model作为用户的唯一识别码，因为联合后在训练、测试数据中的维度较device_id本身仅增加了2.8%。同时删除device_id和device_ip"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### $$$执行代码2：  增加用户识别码use_id，删除训练device_id和device_ip\n",
    "##### 参数设置：\n",
    "filename：数据读取地址；save_name：数据存储地址；chunksize：每次提取多少数据进行处理，取决于电脑内存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Num_data:2000000\n",
      "Num_data:4000000\n",
      "Num_data:6000000\n",
      "Num_data:8000000\n",
      "Num_data:10000000\n",
      "Num_data:12000000\n",
      "Num_data:14000000\n",
      "Num_data:16000000\n",
      "Num_data:18000000\n",
      "Num_data:20000000\n",
      "Num_data:22000000\n",
      "Num_data:24000000\n",
      "Num_data:26000000\n",
      "Num_data:28000000\n",
      "Num_data:30000000\n",
      "Num_data:32000000\n",
      "Num_data:34000000\n",
      "Num_data:36000000\n",
      "Num_data:38000000\n",
      "Num_data:40000000\n",
      "Num_data:42000000\n",
      "Num_data:44000000\n",
      "Num_data:46000000\n",
      "Num_data:48000000\n",
      "Num_data:50000000\n",
      "Num_data:52000000\n",
      "Num_data:54000000\n",
      "Num_data:56000000\n",
      "Num_data:58000000\n",
      "Num_data:60000000\n",
      "Num_data:62000000\n",
      "Num_data:64000000\n",
      "Num_data:66000000\n",
      "Num_data:68000000\n",
      "Num_data:70000000\n",
      "Num_data:72000000\n",
      "Num_data:74000000\n",
      "Num_data:76000000\n",
      "Num_data:78000000\n",
      "Num_data:80000000\n",
      "Num_data:82000000\n"
     ]
    }
   ],
   "source": [
    "filename= 'train.csv'\n",
    "save_name='train_2.csv'\n",
    "\n",
    "data_all=pd.read_csv(filename,chunksize=1000000)\n",
    "\n",
    "i=0\n",
    "for data in data_all:\n",
    "    data['user_id']=data['device_id']+data['device_model']\n",
    "    data.drop(['device_id','device_ip'],inplace=True,axis=1)\n",
    "    if i==0:\n",
    "        data.to_csv(save_name,header=True,index_label=False)\n",
    "    else  :\n",
    "        data.to_csv(save_name,mode='a',header=False,index_label=False)\n",
    "    \n",
    "    \n",
    "    del data\n",
    "    i+=1\n",
    "    \n",
    "        \n",
    "    print('Num_data:'+str(i*2000000))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.2、原19维特征+hour展开3个特征的特征工程\n",
    " 由于pandas用replace处理大规模数据，非常占内存，故而对获取下采样数据进行分批次读取处理。\n",
    "  其中hour_days特征的增加用于生成新增的8个特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### tip:由于使用特征工程时，replace操作很占内存，故而请使用2.2.2代码分批次读取数据，并将数据存入指定文件"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 2.2.1、 原始特征工程，生成非onehot编码\n",
    "      1、实现将除id、click、use_id外取其前1%特征维度的代码\n",
    "      2、实现将hour生成额外3种特征的代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#对输入数据进行特征工程\n",
    "#data：需要进行特征工程的数据；\n",
    "#name_dir：特征工程所需的dict共两个，19维一个，时间一个\n",
    "def Feature_Engineering(data_):\n",
    "#除hour、id、click、device_id、device_ip外19维特征处理\n",
    "    start=time.time()\n",
    "    FE_19_=read_double_dict('FE_19.txt',key_int=False)\n",
    "#此操作：由txt读取的dict中的数据类型是str，故而需将其还原为int类型的数据\n",
    "    list_int=['click','hour','C1','banner_pos','device_type','device_conn_type','C14','C15','C16','C17','C18','C19','C20','C21']\n",
    "    FE_19={}\n",
    "    for k,v in FE_19_.items():\n",
    "        dict_={}\n",
    "        if k in list_int:\n",
    "            for key,value in v.items():\n",
    "                dict_[int(key)]=value\n",
    "        else:\n",
    "            for key,value in v.items():\n",
    "                dict_[key]=value        \n",
    "        FE_19[k]=dict_\n",
    "    \n",
    "    for key in FE_19.keys():\n",
    "        data_[key].replace(FE_19[key],inplace=True)\n",
    "        \n",
    "#——————————————————————————————————————————————        \n",
    "    FE_hour=read_double_dict('FE_hour.txt')\n",
    "#就hour增加哪个小时、星期几，第几天三类特征    \n",
    "    data_['hour_hours']=data_.hour.replace(FE_hour['FE_hour_hours'])\n",
    "    data_['hour_weekday']=data_.hour.replace(FE_hour['FE_hour_weekday'])\n",
    "    data_['hour_days']=data_.hour.replace(FE_hour['FE_hour_days'])\n",
    "    \n",
    "    end=time.time()\n",
    "    print(\"times\"+str(end-start))\n",
    "    return data_"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 2.2.2、 分批次读取要进行特征工程的数据并将其存入一个文件中"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "##由于采用pandas自带的replace所消耗的内存很大，故而需要分批处理数据并将其存到一个文件中。\n",
    "#filename：需特征工程的数据\n",
    "#save_name：文件保存地址\n",
    "#chunksize：每次对多少数据进行特征工程\n",
    "#data_num：总共需要多少完成特征工程的数据\n",
    "filename= 'train_2'\n",
    "save_name='FE_data.csv'\n",
    "def FE_data(filename,save_name,chunksize=100000,data_num=10000000):\n",
    "    step_num=data_num//chunksize\n",
    "    print(step_num)\n",
    "    datas=pd.read_csv(filename,chunksize=chunksize)\n",
    "    i=0\n",
    "    for data in datas:\n",
    "        data_=Feature_Engineering(data)\n",
    "        if i==0:\n",
    "            data.to_csv(save_name,header=True,index_label=False)\n",
    "        else:\n",
    "            data.to_csv(save_name,mode='a',header=False,index_label=False)\n",
    "        i+=1\n",
    "        del data\n",
    "        if i>=step_num:\n",
    "            break\n",
    "    return data_"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### $$$执行代码3： 对前99%的数据进行数字编码，剩余1%编码为-1；将时间hour展开为3个新特征\n",
    "#### 参数设置：\n",
    "filename：数据读取地址；save_name：数据存储地址，chunksize：每批次处理数据数，建议10w；data_num：总共需处理数据数\n",
    "预计1h处理300w数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "100\n",
      "times115.88600063323975\n",
      "times112.50440073013306\n",
      "times111.06480050086975\n",
      "times109.32480072975159\n",
      "times109.2936007976532\n",
      "times109.35600090026855\n",
      "times116.17420077323914\n",
      "times122.3674008846283\n",
      "times114.70680069923401\n",
      "times109.6836006641388\n",
      "times109.4652009010315\n",
      "times110.0268006324768\n",
      "times109.6680006980896\n",
      "times109.73040080070496\n",
      "times111.44840049743652\n",
      "times109.65340089797974\n",
      "times109.8708004951477\n",
      "times110.55720067024231\n",
      "times109.98000073432922\n",
      "times110.760000705719\n",
      "times109.98000073432922\n",
      "times110.52600073814392\n",
      "times111.88320088386536\n",
      "times110.74540066719055\n",
      "times113.08440089225769\n",
      "times110.30760073661804\n",
      "times111.46200060844421\n",
      "times114.38020062446594\n",
      "times112.03920078277588\n",
      "times111.75840044021606\n",
      "times114.16080093383789\n",
      "times111.60240077972412\n",
      "times111.41720056533813\n",
      "times111.24460077285767\n",
      "times110.66640090942383\n",
      "times111.056401014328\n",
      "times110.40120077133179\n",
      "times111.86760091781616\n",
      "times112.41360068321228\n",
      "times112.97520065307617\n",
      "times113.16240072250366\n",
      "times111.93100047111511\n",
      "times111.58680057525635\n",
      "times964.0083656311035\n",
      "times110.3086006641388\n",
      "times109.26240086555481\n",
      "times109.38720059394836\n",
      "times109.4652009010315\n",
      "times109.43400049209595\n",
      "times110.16720080375671\n",
      "times110.12040066719055\n",
      "times110.79120063781738\n",
      "times111.19680094718933\n",
      "times109.46520066261292\n",
      "times110.85360074043274\n",
      "times111.07200074195862\n",
      "times111.89880061149597\n",
      "times110.52600049972534\n",
      "times110.3544008731842\n",
      "times112.5696005821228\n",
      "times882.4659886360168\n",
      "times109.24680066108704\n",
      "times109.32480072975159\n",
      "times109.63680076599121\n",
      "times109.69920086860657\n",
      "times110.29200077056885\n",
      "times109.74600100517273\n",
      "times109.94880056381226\n",
      "times112.08600091934204\n",
      "times109.93320059776306\n",
      "times110.2608003616333\n",
      "times111.0720009803772\n",
      "times110.96380043029785\n",
      "times113.00640058517456\n",
      "times111.41520047187805\n",
      "times111.50880074501038\n",
      "times156.84378910064697\n",
      "times116.59440088272095\n",
      "times117.2808005809784\n",
      "times115.29960083961487\n",
      "times115.23720049858093\n",
      "times116.00160074234009\n",
      "times116.89180088043213\n",
      "times116.12640070915222\n",
      "times110.94720077514648\n",
      "times110.16720080375671\n",
      "times110.60400056838989\n",
      "times110.54160070419312\n",
      "times113.20920085906982\n",
      "times111.30600070953369\n",
      "times111.05640077590942\n",
      "times111.8364007472992\n",
      "times111.68040060997009\n",
      "times112.27620077133179\n",
      "times112.02360081672668\n",
      "times111.25920081138611\n",
      "times111.77400088310242\n",
      "times111.43080043792725\n",
      "times112.11720085144043\n",
      "times112.44480061531067\n"
     ]
    }
   ],
   "source": [
    "filename= 'train_2.csv'\n",
    "save_name='train_3.csv'\n",
    "data_FE=FE_data(filename,save_name,chunksize=100000,data_num=10000000)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4.Drop掉'id','hour','user_id'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Int64Index: 10000000 entries, 0 to 9999999\n",
      "Data columns (total 26 columns):\n",
      "id                  float64\n",
      "click               int64\n",
      "hour                int64\n",
      "C1                  int64\n",
      "banner_pos          int64\n",
      "site_id             int64\n",
      "site_domain         int64\n",
      "site_category       int64\n",
      "app_id              int64\n",
      "app_domain          int64\n",
      "app_category        int64\n",
      "device_model        int64\n",
      "device_type         int64\n",
      "device_conn_type    int64\n",
      "C14                 int64\n",
      "C15                 int64\n",
      "C16                 int64\n",
      "C17                 int64\n",
      "C18                 int64\n",
      "C19                 int64\n",
      "C20                 int64\n",
      "C21                 int64\n",
      "user_id             object\n",
      "hour_hours          int64\n",
      "hour_weekday        int64\n",
      "hour_days           int64\n",
      "dtypes: float64(1), int64(24), object(1)\n",
      "memory usage: 2.0+ GB\n"
     ]
    }
   ],
   "source": [
    "drop_data = pd.read_csv(\"train_3.csv\")\n",
    "drop_data.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "drop_data.drop(['id','hour','user_id'],axis=1,inplace=True)\n",
    "drop_data.to_csv(\"train_4.csv\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.3 进行新增的8个特征工程处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 1、user_hour_C14：每个用户在小时内查看该广告的次数\n",
    "##### 2、user_hour_C17：每个用户，在小时内查看该广告告的次数\n",
    "##### 3、user_day_C14：每个用户，在当天查看该广告的次数\n",
    "##### 4、user_day_C17：每个用户，在当天查看该类型广告的次数\n",
    "##### 5、user_day_times:每个用户，当天的出现次数\n",
    "##### 6、user_day_app_id：每个用户，一天在此app_id中登录的次数\n",
    "##### 7、user_days:每个用户，累计登陆的天数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "此块代码耗时非常长，原因是先用bygroup生成所需用户热度(频率)的dataframe，再遍历数据根据关键字从dataframe提取对应频率。所处理数据越多\n",
    "#生成频率的dataframe就越大，在遍历数据查找所消耗的时间就越多。    经测算，600w数据，用单核预计得9天。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### $$$执行代码4： 生成新的7个表示热度的特征，同时删除'id','hour','user_id'这三个特征\n",
    "### $请依次执行以下4块代码\n",
    "#### 参数设置：\n",
    "filename：数据读取地址；save_name：生成数据存储地址"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "filename= 'D:/Jupyter/w000/FE_data_train_26.csv'\n",
    "save_name='D:/Jupyter/w000/FE_data_train_30.csv'\n",
    "data=pd.read_csv(filename,nrows=1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "#1、根据user_id,hour,C14生成用户在每个小时段，登陆C14的次数表格\n",
    "user_hour_C14_dict=data.groupby(['user_id', 'hour','C14'],as_index=False,squeeze=True).size().reset_index()\n",
    "#2、根据user_id,hour,C17生成用户在每个小时段，登陆C17的次数表格\n",
    "user_hour_C17_dict=data.groupby(['user_id', 'hour','C17'],as_index=False,squeeze=True).size().reset_index()\n",
    "#3、根据user_id,hour_days,C14生成用户在每天，登陆C14的次数表格\n",
    "user_day_C14_dict=data.groupby(['user_id', 'hour_days','C14'],as_index=False,squeeze=True).size().reset_index()\n",
    "#4、根据user_id,hour_days,C17生成用户在每天，登陆C14的次数表格\n",
    "user_day_C17_dict=data.groupby(['user_id', 'hour_days','C17'],as_index=False,squeeze=True).size().reset_index()\n",
    "\n",
    "#5、根据user_id,hour_days生成用户在每天出现次数表格\n",
    "user_day_times_dict=data.groupby(['user_id', 'hour_days'],as_index=False,squeeze=True).size().reset_index()\n",
    "\n",
    "#6、根据user_id,hour_days，app_id生成用户每天登陆特定app的次数\n",
    "user_day_app_id_dict=data.groupby(['user_id', 'hour_days','app_id'],as_index=False,squeeze=True).size().reset_index()\n",
    "\n",
    "#7、在user_day_times_dict的基础上对user_id进行排序，得到每个用户出现的天数\n",
    "user_days_dict=user_day_times_dict.groupby(['user_id'],as_index=False,squeeze=True).size().reset_index()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 生成7个新特征数据\n",
    "##### tip：10w数据差不多消耗20min"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "6.26265287399292\n"
     ]
    }
   ],
   "source": [
    "#相关find_user_hour_C14函数在FE_utils\n",
    "start=time.time()\n",
    "data['user_hour_C14'] = data.apply(lambda row: find_user_hour_C14(row['user_id'], row['hour'], row['C14'],user_hour_C14_dict), axis=1)\n",
    "data['user_hour_C17'] = data.apply(lambda row: find_user_hour_C17(row['user_id'], row['hour'], row['C17'],user_hour_C17_dict), axis=1)\n",
    "data['user_day_C14'] = data.apply(lambda row: find_user_day_C14(row['user_id'], row['hour_days'], row['C14'],user_day_C14_dict), axis=1)\n",
    "data['user_day_C17'] = data.apply(lambda row: find_user_day_C17(row['user_id'], row['hour_days'], row['C17'],user_day_C17_dict), axis=1)\n",
    "\n",
    "data['user_day_times'] = data.apply(lambda row: find_user_day_times(row['user_id'], row['hour_days'],user_day_times_dict), axis=1)\n",
    "data['user_day_app_id'] = data.apply(lambda row: find_user_day_app_id(row['user_id'], row['hour_days'], row['app_id'],user_day_app_id_dict), axis=1)\n",
    "\n",
    "data['user_days'] = data.apply(lambda row: find_user_days(row['user_id'],user_days_dict), axis=1)\n",
    "end=time.time()\n",
    "print(end-start)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "#由于数据此前经历过下采样，正负样本相对集中，故而此处将数据的\n",
    "data.sample(frac=1).reset_index(drop=True)\n",
    "#将id、hour、user_id三个特征删除\n",
    "data.drop(['id','hour','user_id'],inplace=True,axis=1)\n",
    "#在删除了'id','hour','user_id'后，共有特征30个。\n",
    "#其中类别型特征21个\n",
    "#数值型特征共8个：hour_days、user_hour_C14、user_hour_C17、user_day_C14、user_day_C17、user_day_times、user_day_app_id、user_days\n",
    "data.to_csv(save_name)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
