{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Tips\n",
    "* 生成特征时，一定要记得过滤label `df = get_hdf('all', if_filter_label=True, if_drop_duplicates=True)`\n",
    "* 分布式运行pandas `import modin.pandas as pd`, 必须放到读取文件之后\n",
    "* 连续使用同一个df创建特征时， 给函数传递参数时必须加上`df.copy`， 否则`df`在函数内会被改变"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "%run round2_base.ipynb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "sample = get_sample(dtype='rank')\n",
    "df = get_hdf('all', if_filter_label=True, if_drop_duplicates=True, if_lastday=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def add_prefix(df, exclude_columns, prefix):\n",
    "    if isinstance(exclude_columns, str):\n",
    "        exclude_columns = [exclude_columns]\n",
    "        \n",
    "    column_names = [col for col in df.columns if col not in exclude_columns]\n",
    "    df.rename(columns = dict(zip(column_names, [prefix + name for name in column_names])), inplace=True)\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def group_func(df, group_func_dic, group_key):\n",
    "    if isinstance(group_func_dic, str):\n",
    "        group_func_dic = [group_func_dic]\n",
    "        \n",
    "    features = df.groupby(group_key).agg(group_func_dic)\n",
    "    features.columns = [e[0] + \"_\" + e[1].upper() for e in features.columns.tolist()]\n",
    "    features.reset_index(inplace=True)\n",
    "    return features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def filter_sample(df, key=None):\n",
    "    if key is None:\n",
    "        df = df.merge(sample[['buyer_admin_id']].drop_duplicates(), on=['buyer_admin_id'], how='inner')\n",
    "    else:\n",
    "        df = df.merge(sample[['buyer_admin_id', key]].drop_duplicates(), on=['buyer_admin_id', key], how='inner')\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_dupli_feature(df):\n",
    "    \"\"\"\n",
    "    df = get_hdf('buy', if_filter_label=True)\n",
    "    import modin.pandas\n",
    "    get_user_item_dupli_feature(df.copy())\n",
    "    \"\"\"\n",
    "    user = df[df[['buyer_admin_id', 'item_id', 'log_time']].duplicated(keep=False)][['buyer_admin_id','item_id']].drop_duplicates()\n",
    "    dup = df.merge(user, how='inner', on=['buyer_admin_id', 'item_id'])\n",
    "    \n",
    "    feature_type = {\n",
    "        'dense_rank' : ['max', 'min', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'],\n",
    "        'last_second_diff':['max', 'min', 'mean']\n",
    "    }    \n",
    "    dup_feature = group_func(dup, feature_type, group_key=['buyer_admin_id', 'item_id'])\n",
    "    dup_feature = add_prefix(dup_feature, ['buyer_admin_id', 'item_id'], 'user_item_dup_')\n",
    "    \n",
    "    dup_cnt = dup.groupby(['buyer_admin_id', 'item_id', 'log_time']).size().to_frame('dup_cnt').reset_index()\n",
    "\n",
    "    feature_type = {\n",
    "        'dup_cnt':['first', 'max', 'min', 'last', 'nunique'],\n",
    "    }\n",
    "    \n",
    "    feature = group_func(dup_cnt, feature_type, group_key=['buyer_admin_id', 'item_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'item_id'], 'user-item_')\n",
    "    feature['user-item_dup_cnt_FIRST=MAX'] = feature['user-item_dup_cnt_MAX'] - feature['user-item_dup_cnt_FIRST']\n",
    "    \n",
    "    irank2 = df[df['irank']==2][['buyer_admin_id', 'item_id']]\n",
    "    irank2_flag = irank2.merge(dup_cnt[['buyer_admin_id', 'item_id']].drop_duplicates(), how='inner', on=['buyer_admin_id', 'item_id'])\\\n",
    "        .merge(feature, how='left', on=['buyer_admin_id', 'item_id'])\n",
    "    irank2_flag['irank2_is_dup'] = 1\n",
    "    irank2_flag['irank2_is_dup_scope'] = irank2_flag['irank2_is_dup'] * (irank2_flag['user-item_dup_cnt_FIRST'] < irank2_flag['user-item_dup_cnt_MAX'])\n",
    "    irank2_flag = irank2_flag.drop([col for col in irank2_flag.columns if 'user-item' in col], 1)\n",
    "\n",
    "    irank3 = df[df['irank']==3][['buyer_admin_id', 'item_id']]\n",
    "    irank3_flag = irank3.merge(dup_cnt[['buyer_admin_id', 'item_id']].drop_duplicates(), how='inner', on=['buyer_admin_id', 'item_id'])\\\n",
    "        .merge(feature, how='left', on=['buyer_admin_id', 'item_id'])\n",
    "    irank3_flag['irank3_is_dup'] = 1\n",
    "    irank3_flag['irank3_is_dup_scope'] = irank3_flag['irank3_is_dup'] * (irank3_flag['user-item_dup_cnt_FIRST'] < irank3_flag['user-item_dup_cnt_MAX'])\n",
    "    irank3_flag = irank3_flag.drop([col for col in irank3_flag.columns if 'user-item' in col], 1)\n",
    "    feature = feature.merge(irank2_flag, how='left', on=['buyer_admin_id', 'item_id'])\n",
    "    feature = feature.merge(irank3_flag, how='left', on=['buyer_admin_id', 'item_id'])\n",
    "    feature = feature.merge(dup_feature, how='left', on=['buyer_admin_id', 'item_id'])\n",
    "    feature.to_hdf('../feature/rank/user_item_dupli_feature', 'all')\n",
    "    print('>>> get_user_item_dupli_feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_feature(df):\n",
    "    \"\"\"\n",
    "    用户基础统计特征：\n",
    "    \n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重数，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒 \n",
    "    3. 品类数：去重\n",
    "    4. 店铺数：去重\n",
    "    5. 商品数：去重\n",
    "    6. 商品价格：最大、最小、平均\n",
    "    \n",
    "    备注：线下：0.8795→0.8795  提升：0\n",
    "    备注：线下：0.8854→0.8850  提升：-0.0004\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'buyer_admin_id' : ['count'],\n",
    "        'day':['nunique', 'max', 'min', np.ptp],\n",
    "        'second':['max', 'min', np.ptp],\n",
    "        'item_id':['nunique'],\n",
    "        'cate_id':['nunique'],\n",
    "        'store_id':['nunique'],\n",
    "        'item_price': ['max', 'min', 'mean'],\n",
    "    }\n",
    "    df = filter_sample(df)\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id'], 'user_')\n",
    "    feature.to_hdf('../feature/rank/user_feature', 'all')\n",
    "    print('>>> user feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_cate_feature(df):\n",
    "    \"\"\"\n",
    "    品类基础特征：\n",
    "    1. 行为数\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        iii: 秒\n",
    "    3. 用户数：去重\n",
    "    4. 商品数：去重\n",
    "    5. 店铺数：去重\n",
    "    6. 商品价格：最大、最小、差值\n",
    "    \n",
    "    备注：线下：0.8795→0.8795  提升：0\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'cate_id' : ['count'],\n",
    "        'buyer_admin_id' : ['nunique'],\n",
    "        'item_id' :['nunique'],\n",
    "        'store_id' : ['nunique'],\n",
    "        'item_price': ['min', 'max', np.ptp],\n",
    "        'day': ['max', 'min', 'nunique'],\n",
    "        'second' : ['max', 'min', 'nunique', np.ptp],\n",
    "    }\n",
    "\n",
    "    feature = group_func(df, feature_type, group_key=['cate_id'])\n",
    "    feature = add_prefix(feature, ['cate_id'], 'cate_')\n",
    "    feature.to_hdf('../feature/rank/cate_feature', 'all')\n",
    "    print('>>> cate feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_store_feature(df):\n",
    "    \"\"\"\n",
    "    店铺基础特征：\n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        iii: 秒\n",
    "    3. 用户数：去重\n",
    "    4. 商品数：去重\n",
    "    5. 品类数：去重\n",
    "    6. 商品价格：最大、最小、差值\n",
    "    \n",
    "    备注：线下：0.8795→0.8795  提升：0\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'store_id' : ['count'],\n",
    "        'buyer_admin_id' : ['nunique'],\n",
    "        'item_id' :['nunique'],\n",
    "        'cate_id' : ['nunique'],\n",
    "        'item_price': ['min', 'max', np.ptp],\n",
    "        'day': ['max', 'min', 'nunique'],\n",
    "        'second' : ['max', 'min', 'nunique', np.ptp],\n",
    "    }\n",
    "\n",
    "    feature = group_func(df, feature_type, group_key=['store_id'])\n",
    "    feature = add_prefix(feature, ['store_id'], 'store_')\n",
    "    feature.to_hdf('../feature/rank/store_feature', 'all')\n",
    "    print('>>> store feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_feature(df):\n",
    "    \"\"\"\n",
    "    商品基础特征：\n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 品类数：去重\n",
    "    6. 商品价格：最大、最小、平均、求和、var、std\n",
    "    3. 用户数：去重\n",
    "    \n",
    "    备注：线下：0.8795→0.8795  提升：0\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id' : ['count'],\n",
    "        'buyer_admin_id' : ['nunique'],\n",
    "        'day': ['max', 'min', 'nunique'],\n",
    "        'second' : ['max', 'min', 'nunique', np.ptp],\n",
    "    }\n",
    "    feature = group_func(df, feature_type, group_key=['item_id'])\n",
    "    feature = add_prefix(feature, ['item_id'], 'item_')\n",
    "    feature.to_hdf('../feature/rank/item_feature', 'all')\n",
    "    print('>>> item feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_cate_feature(df, name='all'):\n",
    "    \"\"\"\n",
    "    商品 * 品类基础特征：\n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 店铺数：去重\n",
    "    4. 商品数：去重\n",
    "    5. 商品价格：最大、最小、平均、求和、var、std\n",
    "    6. 用户数：去重\n",
    "    \n",
    "    备注：线下：0.8697→0.8764  提升：0.0067\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id' : ['nunique'],\n",
    "        'second' : ['nunique', 'max', 'min', 'mean', 'std', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'],\n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'day':['nunique', 'max', 'min', np.ptp],\n",
    "        'item_price': ['max', 'min'],\n",
    "        'dense_rank': ['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    \n",
    "    df = filter_sample(df, 'cate_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'cate_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'cate_id'], 'user_cate_' + name + '_')\n",
    "    feature.to_hdf('../feature/rank/user_cate_feature', name)\n",
    "    print('>>> user_cate feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_store_feature(df, name='all'):\n",
    "    \"\"\"\n",
    "    商品 * 品类基础特征：\n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 店铺数：去重\n",
    "    4. 商品数：去重\n",
    "    5. 商品价格：最大、最小、平均、求和、var、std\n",
    "    6. 用户数：去重\n",
    "    \n",
    "    备注：线下：0.8764→0.8773  提升：0.001\n",
    "    ---------------------------------------------\n",
    "    sample = get_sample(dtype='rank')\n",
    "    df = get_hdf('buy', if_filter_label=True, if_drop_duplicates=True)\n",
    "    get_user_cate_feature(df, 'buy')\n",
    "    get_user_store_feature(df, 'buy')\n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id' : ['nunique'],\n",
    "        'cate_id' : ['nunique'],\n",
    "        'second' : ['nunique', 'max', 'min', 'mean', 'std', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'],\n",
    "        'last_second_diff':['max', 'min', 'mean'],\n",
    "        'day':['nunique', 'max', 'min', np.ptp],\n",
    "        'item_price': ['max', 'min'],\n",
    "        'dense_rank': ['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    \n",
    "    df = filter_sample(df, 'store_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'store_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'store_id'], 'user_store_' + name + '_')\n",
    "    feature.to_hdf('../feature/rank/user_store_feature', name)\n",
    "    print('>>> user_store feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_feature(df):\n",
    "    \"\"\"\n",
    "    用户 * 商品基础特征：\n",
    "    \n",
    "    1. 行为数：计数\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 商品价格：求和\n",
    "    \n",
    "    备注：线下：0→0.88696  提升:\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id':['count'],\n",
    "        'day':['nunique', 'max', 'min', 'mean', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'], \n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'second':['nunique', 'max', 'min', 'mean', np.ptp],\n",
    "        'irank':['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    \n",
    "    df = filter_sample(df, 'item_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'item_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'item_id'], 'user_item_')\n",
    "    feature.to_hdf('../feature/rank/user_item_feature', 'all')\n",
    "    print('>>> user_item feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_dedup_feature(df):\n",
    "    \"\"\"\n",
    "    用户 * 商品基础特征：\n",
    "    \n",
    "    1. 行为数：计数\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 商品价格：求和\n",
    "    \n",
    "    备注：线下：0→0.88696  提升:\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id':['count'],\n",
    "        'day':['nunique', 'max', 'min', 'mean', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'], \n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'second':['nunique', 'max', 'min', 'mean', np.ptp],\n",
    "        'dense_rank': ['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    \n",
    "    df = filter_sample(df, 'item_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'item_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'item_id'], 'user_item_dedup_')\n",
    "    feature.to_hdf('../feature/rank/user_item_dedup_feature', 'all')\n",
    "    print('>>> user_item_dedup_feature feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_cate_dedup_feature(df):\n",
    "    \"\"\"\n",
    "    商品 * 品类基础特征：\n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 店铺数：去重\n",
    "    4. 商品数：去重\n",
    "    5. 商品价格：最大、最小、平均、求和、var、std\n",
    "    6. 用户数：去重\n",
    "    \n",
    "    备注：线下：0.8697→0.8764  提升：0.0067\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id' : ['nunique'],\n",
    "        'store_id' : ['nunique'],\n",
    "        'second' : ['nunique', 'max', 'min', 'mean', 'std', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'],\n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'day':['nunique', 'max', 'min', np.ptp],\n",
    "        'item_price': ['max', 'min'],\n",
    "        'dense_rank':['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    df = filter_sample(df, 'cate_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'cate_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'cate_id'], 'user_cate_dedup_')\n",
    "    feature.to_hdf('../feature/rank/user_cate_dedup_feature', 'all')\n",
    "    print('>>> user_cate_dedup_feature feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_store_dedup_feature(df):\n",
    "    \"\"\"\n",
    "    商品 * 品类基础特征：\n",
    "    1. 行为数： #TODO: 用户下单数 * 划窗\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 店铺数：去重\n",
    "    4. 商品数：去重\n",
    "    5. 商品价格：最大、最小、平均、求和、var、std\n",
    "    6. 用户数：去重\n",
    "    \n",
    "    备注：线下：0.8697→0.8764  提升：0.0067\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    feature_type = {\n",
    "        'item_id' : ['nunique'],\n",
    "        'cate_id' : ['nunique'],\n",
    "        'second' : ['nunique', 'max', 'min', 'mean', 'std', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'],\n",
    "        'last_second_diff':['max', 'min', 'mean'],\n",
    "        'day':['nunique', 'max', 'min', np.ptp],\n",
    "        'item_price': ['max', 'min'],\n",
    "        'irank_dedup':['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    df = filter_sample(df, 'store_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'store_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'store_id'], 'user_store_dedup_')\n",
    "    feature.to_hdf('../feature/rank/user_store_dedup_feature', 'all')\n",
    "    print('>>> user_store_dedup_feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_lastday_dedup_feature(df):\n",
    "    \"\"\"\n",
    "    在用户最后行为当天内，用户 * 商品基础特征：\n",
    "    \n",
    "    1. 行为数：计数\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 商品价格：求和\n",
    "    \n",
    "    备注：线下：0.8795→0.8783  提升:-0.0012\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    \n",
    "    feature_type = {\n",
    "        'item_id':['count'],\n",
    "        'day':['nunique', 'max', 'min', 'mean', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'], \n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'second':['nunique', 'max', 'min', 'mean', np.ptp],\n",
    "        'dense_rank': ['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    df = filter_sample(df, 'item_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'item_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'item_id'], 'user_item_lastday_dedup_')\n",
    "    feature.to_hdf('../feature/rank/user_item_lastday_dedup_feature', 'all')\n",
    "    print('>>> user_item_lastday_dedup_feature feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_cate_lastday_dedup_feature(df):\n",
    "    \"\"\"\n",
    "    在用户最后行为当天内，用户 * 商品基础特征：\n",
    "    \n",
    "    1. 行为数：计数\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 商品价格：求和\n",
    "    \n",
    "    备注：线下：0.8795→0.8783  提升:-0.0012\n",
    "    ---------------------------------------------\n",
    "    last_day = df.groupby(['buyer_admin_id'])['day'].max().to_frame('last_day').reset_index()\n",
    "    df = df.merge(last_day, on=['buyer_admin_id'], how='left')\n",
    "    df = df[df['day']==df['last_day']]\n",
    "    \"\"\"\n",
    "    \n",
    "    feature_type = {\n",
    "        'item_id' : ['nunique'],\n",
    "        'store_id' : ['nunique'],\n",
    "        'second' : ['nunique', 'max', 'min', 'mean', 'std', np.ptp],\n",
    "        'first_second_diff':['max', 'min', 'mean'],\n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'day':['nunique', 'max', 'min', np.ptp],\n",
    "        'item_price': ['max', 'min'],\n",
    "        'dense_rank':['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    df = filter_sample(df, 'cate_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'cate_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'cate_id'], 'user_cate_lastday_dedup_')\n",
    "    feature.to_hdf('../feature/rank/user_cate_lastday_dedup_feature', 'all')\n",
    "    print('>>> user_cate_lastday_dedup_feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_store_lastday_dedup_feature(df):\n",
    "    \"\"\"\n",
    "    在用户最后行为当天内，用户 * 商品基础特征：\n",
    "    \n",
    "    1. 行为数：计数\n",
    "    2. 行为时间：去重，首次，末次，首末差\n",
    "        i: 天\n",
    "        ii: 小时\n",
    "        iii: 秒\n",
    "    3. 商品价格：求和\n",
    "    \n",
    "    备注：线下：0.8795→0.8783  提升:-0.0012\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    \n",
    "    feature_type = {\n",
    "        'item_id' : ['nunique'],\n",
    "        'cate_id' : ['nunique'],\n",
    "        'first_second_diff':['max', 'min', 'mean'], \n",
    "        'last_second_diff':['max', 'min', 'mean'], \n",
    "        'second':['max', 'min', 'mean', np.ptp],\n",
    "        'item_price': ['max', 'min', 'mean', 'sum', 'std'],\n",
    "        'dense_rank':['max', 'min', 'mean', 'std'],\n",
    "    }\n",
    "    df = filter_sample(df, 'store_id')\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'store_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'store_id'], 'user_store_lastday_dedup_')\n",
    "    feature.to_hdf('../feature/rank/user_store_lastday_dedup_feature', 'all')\n",
    "    print('>>> user_store_lastday_dedup_feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_rank_feature(feature, feature_name, key, group_key=[], ascending=False):\n",
    "    \"\"\"\n",
    "    import pandas\n",
    "    name = 'user_item_dedup_feature'\n",
    "    feat = pandas.read_hdf('../feature/rank/' + name, 'all')\n",
    "    item = pandas.read_csv('../data/Antai_AE_round2_item_attr_20190813.zip')[['item_id', 'cate_id', 'store_id']]\n",
    "    feature = pandas.merge(feat, item, how='left', on=['item_id'])\n",
    "\n",
    "    get_user_rank_feature(feature, name, key=['item_id'], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=['item_id'], group_key=[], ascending=False)\n",
    "    get_user_rank_feature(feature, name, key=['item_id'], group_key=['cate_id'] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=['item_id'], group_key=['cate_id'], ascending=False)\n",
    "    get_user_rank_feature(feature, name, key=['item_id'], group_key=['store_id'] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=['item_id'], group_key=['store_id'], ascending=False)\n",
    "\n",
    "    name = 'user_cate_dedup_feature'\n",
    "    feature = pandas.read_hdf('../feature/rank/' + name, 'all')\n",
    "    get_user_rank_feature(feature, name, key=['cate_id'], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=['cate_id'], group_key=[], ascending=False)\n",
    "\n",
    "    name = 'user_store_dedup_feature'\n",
    "    feature = pandas.read_hdf('../feature/rank/' + name, 'all')\n",
    "    get_user_rank_feature(feature, name, key=['store_id'], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=['store_id'], group_key=[], ascending=False)\n",
    "    \n",
    "    name = 'user_second_diff_feature'\n",
    "    feature = pandas.read_hdf('../feature/rank/' + name, 'user')\n",
    "    get_user_rank_feature(feature, name, key=[], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=[], group_key=[], ascending=False)    \n",
    "\n",
    "    name = 'user_second_diff_feature'\n",
    "    feature = pandas.read_hdf('../feature/rank/' + name, 'item_id')\n",
    "    get_user_rank_feature(feature, name, key=[], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature, name, key=[], group_key=[], ascending=False)\n",
    "    \n",
    "    name = 'user_second_diff_feature'\n",
    "    feature = pd.read_hdf('../feature/rank/' + name, 'item_id')\n",
    "    item = pd.read_csv('../data/Antai_AE_round2_item_attr_20190813.zip')[['item_id', 'cate_id', 'store_id']]\n",
    "    feature = pd.merge(feature, item, how='left', on=['item_id'])\n",
    "\n",
    "    import modin.pandas\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=[], ascending=False)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['cate_id'] ,ascending=True)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['cate_id'], ascending=False)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['store_id'] ,ascending=True)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['store_id'], ascending=False)\n",
    "    \n",
    "    name = 'user_item_lastday_dedup_feature'\n",
    "    feature = pd.read_hdf('../feature/rank/' + name, 'all')\n",
    "    item = pd.read_csv('../data/Antai_AE_round2_item_attr_20190813.zip')[['item_id', 'cate_id', 'store_id']]\n",
    "    feature = pd.merge(feature, item, how='left', on=['item_id'])\n",
    "\n",
    "    import modin.pandas\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=[] ,ascending=True)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=[], ascending=False)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['cate_id'] ,ascending=True)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['cate_id'], ascending=False)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['store_id'] ,ascending=True)\n",
    "    get_user_rank_feature(feature.copy(), name, key=['item_id'], group_key=['store_id'], ascending=False)\n",
    "    ------------------------------------------------------------------------------------------\n",
    "    'user_item_dedup_feature', 'asc'\n",
    "    'user_item_dedup_feature', 'desc'\n",
    "    'user_item_dedup_feature', 'cate_id_asc'\n",
    "    'user_item_dedup_feature', 'cate_id_desc\n",
    "    'user_item_dedup_feature', 'store_id_asc'\n",
    "    'user_item_dedup_feature', 'store_id_desc\n",
    "    \n",
    "    'user_cate_dedup_feature', 'asc'\n",
    "    'user_cate_dedup_feature', 'desc'    \n",
    "\n",
    "    'user_store_dedup_feature', 'asc'\n",
    "    'user_store_dedup_feature', 'desc'\n",
    "    \n",
    "    'user_second_diff_feature', 'asc'\n",
    "    'user_second_diff_feature', 'desc'\n",
    "    \n",
    "    'user_second_diff_feature', 'cate_id_asc'\n",
    "    'user_second_diff_feature', 'cate_id_desc'\n",
    "    \n",
    "    'user_second_diff_feature', 'store_id_asc'\n",
    "    'user_second_diff_feature', 'store_id_desc'\n",
    "    \n",
    "    'user_item_lastday_dedup_feature', 'asc'\n",
    "    'user_item_lastday_dedup_feature', 'desc'\n",
    "    \n",
    "    'user_item_lastday_dedup_feature', 'cate_id_asc'\n",
    "    'user_item_lastday_dedup_feature', 'cate_id_desc'\n",
    "    \n",
    "    'user_item_lastday_dedup_feature', 'store_id_asc'\n",
    "    'user_item_lastday_dedup_feature', 'store_id_desc'\n",
    "    \n",
    "    用户 * 商品 * 排序 基础特征：\n",
    "    \n",
    "    备注：\n",
    "    1. desc降序：线下：0.8795→0.8795  提升:0\n",
    "    1. asc升序：线下：0.8795→0.8813  提升:-0.0018\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    if ascending:\n",
    "        name = 'asc'\n",
    "    else:\n",
    "        name = 'desc'\n",
    "    columns = []\n",
    "    for col in feature.columns:\n",
    "        if col not in ['buyer_admin_id', 'item_id', 'cate_id', 'store_id']:\n",
    "            column_name = col + '_rank_' + name\n",
    "            feature[column_name] = feature.groupby(['buyer_admin_id'] + group_key)[col].rank(ascending=ascending, method='dense')\n",
    "            columns.append(column_name)\n",
    "            \n",
    "    if len(group_key)>0:\n",
    "        feature = feature[['buyer_admin_id', 'item_id'] + group_key + columns]\n",
    "        name = group_key[0] + '_' + name\n",
    "    else:\n",
    "        feature = feature[['buyer_admin_id']+ key + columns]\n",
    "        \n",
    "    feature.to_hdf('../feature/rank/' + feature_name, name)\n",
    "    print('>>> user_rank_feature feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_second_diff_feature(df):\n",
    "    \"\"\"\n",
    "    df = get_hdf('all', if_filter_label=True, if_drop_duplicates=True)\n",
    "    import modin.pandas\n",
    "    get_user_item_dupli_feature(df.copy())\n",
    "    用户时间间隔统计特征：\n",
    "    聚合层级：cate_id, store_id, item_id\n",
    "    \n",
    "    1. 商品与下个商品间隔\n",
    "    2. 商品与下个同样商品间隔\n",
    "    3. 商品与下个同品类商品间隔\n",
    "    4. 商品与下个同店铺商品间隔\n",
    "    \n",
    "    备注：线下：0.8843→0.8852  提升：0.009\n",
    "    ---------------------------------------------\n",
    "    \n",
    "    \"\"\"\n",
    "    df = df[['buyer_admin_id', 'store_id', 'cate_id', 'item_id', 'second']].drop_duplicates()\n",
    "    df['second_diff'] = df['second'] - df.groupby(['buyer_admin_id'])['second'].shift(1)\n",
    "    df['cate_id_second_diff'] = df['second'] - df.groupby(['buyer_admin_id', 'cate_id'])['second'].shift(1)\n",
    "    df['store_id_second_diff'] = df['second'] - df.groupby(['buyer_admin_id', 'store_id'])['second'].shift(1)\n",
    "    \n",
    "    feature_type = {\n",
    "        'second_diff' : ['max', 'min', 'mean', 'std', np.ptp],\n",
    "        'cate_id_second_diff':['max', 'min', 'mean', 'std', np.ptp],\n",
    "        'store_id_second_diff':['max', 'min', 'mean', 'std', np.ptp],\n",
    "    }\n",
    "    \n",
    "    df = filter_sample(df)\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id'])\n",
    "    feature = add_prefix(feature, ['buyer_admin_id'], 'user_second_diff_')\n",
    "    feature.to_hdf('../feature/recall/user_second_diff_feature', 'user')\n",
    "    \n",
    "    for level in ['item_id', 'cate_id', 'store_id']:\n",
    "        feature = group_func(df, feature_type, group_key=['buyer_admin_id', level])\n",
    "        feature = add_prefix(feature, ['buyer_admin_id', level], 'user_' + level + '_second_diff_')\n",
    "        feature.to_hdf('../feature/recall/user_second_diff_feature', level)\n",
    "    print('>>> user_second_diff_feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_prop_feature(df):\n",
    "    \"\"\"\n",
    "    备注：\n",
    "    线下：0.8795→0.8786  提升:-0.009\n",
    "    线下：0.8799→0.8800  提升:+0.001\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    df = df.drop_duplicates(subset=['buyer_admin_id', 'item_id', 'second'], keep='first')\n",
    "    df = filter_sample(df)\n",
    "    \n",
    "    feature = df.groupby(['buyer_admin_id'])['item_id'].value_counts(normalize=True).to_frame('item_id_prop').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'item_id_prop')\n",
    "    \n",
    "    feature = df.groupby(['buyer_admin_id'])['cate_id'].value_counts(normalize=True).to_frame('cate_id_prop').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'cate_id_prop')\n",
    "    \n",
    "    feature = df.groupby(['buyer_admin_id'])['store_id'].value_counts(normalize=True).to_frame('store_id_prop').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'store_id_prop')\n",
    "    \n",
    "    df = filter_sample(df, 'cate_id')\n",
    "    feature = df.groupby(['buyer_admin_id', 'cate_id'])['item_id'].value_counts(normalize=True).to_frame('item_id_prop_cate_id').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'item_id_prop_cate_id')\n",
    "    \n",
    "    feature = df.groupby(['buyer_admin_id', 'cate_id'])['store_id'].value_counts(normalize=True).to_frame('store_id_prop_cate_id').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'store_id_prop_cate_id')\n",
    "    \n",
    "    df = filter_sample(df, 'store_id')\n",
    "    feature = df.groupby(['buyer_admin_id', 'store_id'])['item_id'].value_counts(normalize=True).to_frame('item_id_prop_store_id').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'item_id_prop_store_id')\n",
    "    \n",
    "    feature = df.groupby(['buyer_admin_id', 'store_id'])['cate_id'].value_counts(normalize=True).to_frame('cate_id_prop_store_id').reset_index()\n",
    "    feature.to_hdf('../feature/rank/user_prop_feature', 'cate_id_prop_store_id')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_irank_feature(df):\n",
    "    \"\"\"\n",
    "    用户 * irank2商品基础特征：\n",
    "    \n",
    "    1. 是否与irank2相同item_id\n",
    "    2. 是否与irank2相同cate_id\n",
    "    3. 是否与irank2相同store_id\n",
    "    \"\"\"\n",
    "    feature = df[['buyer_admin_id', 'item_id', 'cate_id', 'store_id']].drop_duplicates()\n",
    "    \n",
    "    for i in range(2, 6):\n",
    "        irank = df[df['irank']==i][['buyer_admin_id', 'item_id', 'cate_id', 'store_id']]\n",
    "        irank.columns = ['buyer_admin_id', f'irank{i}_item_id', f'irank{i}_cate_id', f'irank{i}_store_id']\n",
    "        feature = pd.merge(feature, irank, how='left', on='buyer_admin_id')\n",
    "\n",
    "    for col in feature.columns:\n",
    "        if 'irank' in col:\n",
    "            feature[col] = (feature[col]==feature[col.split('_', 1)[1]]).astype(int)\n",
    "    \n",
    "    feature = feature.drop(['cate_id', 'store_id'], 1)\n",
    "    feature.to_hdf('../feature/rank/user_item_irank_feature', 'all')\n",
    "    print('>>> user-item-irank feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_rank_diff_feature(df, groupby_key=[]):\n",
    "    \"\"\"\n",
    "    TODO: 用户下rank\n",
    "    用户商品间隔特征\n",
    "        1. 商品最后一次浏览过后，还浏览了多少同品类、同店铺的商品，按次数倒序 key-first\n",
    "        \n",
    "    \n",
    "    \"\"\"\n",
    "    \n",
    "    df['item_rank_diff'] = df['dense_rank'] - df.groupby(['buyer_admin_id', 'item_id'])['dense_rank'].shift(1)\n",
    "    \n",
    "    for col in ['cate_id', 'store_id']:\n",
    "        df[col + '_rank'] = df.groupby(['buyer_admin_id', col]).cumcount()\n",
    "        df[col + '_rank_diff'] = df[col + '_rank'] - df.groupby(['buyer_admin_id', 'item_id'])[col + '_rank'].shift(1)\n",
    "#         df.loc[df[col + '_rank_diff'].isnull(), col + '_rank_diff'] = df[col + '_rank']\n",
    "    \n",
    "#     df['cate_id_rank_diff'] = df['dense_rank'] - df.groupby(['buyer_admin_id', 'cate_id'])['dense_rank'].shift(1)\n",
    "#     df['store_id_rank_diff'] = df['dense_rank'] - df.groupby(['buyer_admin_id', 'store_id'])['dense_rank'].shift(1)\n",
    "    \n",
    "    feature_type = {\n",
    "        'store_id_rank' : ['first', 'last'],\n",
    "        'cate_id_rank' : ['first', 'last'], # rank 0.0002\n",
    "        'cate_id_rank_diff' : ['first', 'max', 'min', 'mean'],\n",
    "        'store_id_rank_diff' : ['first', 'max', 'min', 'mean'],\n",
    "        'item_rank_diff': ['first', 'max', 'min', 'mean'],\n",
    "    }\n",
    "    \n",
    "    df = filter_sample(df)\n",
    "    feature = group_func(df, feature_type, group_key=['buyer_admin_id', 'item_id'])\n",
    "    \n",
    "    for col in feature.columns:\n",
    "        if col not in ['buyer_admin_id', 'item_id', 'cate_id', 'store_id']:\n",
    "            if len(groupby_key) > 0:\n",
    "                name = col + '_'+ groupby_key[0] + '_rank_asc'\n",
    "            else:\n",
    "                name = col + '_rank_asc'\n",
    "            feature[name] = feature.groupby(['buyer_admin_id'] + groupby_key)[col].rank(ascending=True, method='dense')  \n",
    "\n",
    "    feature = add_prefix(feature, ['buyer_admin_id', 'item_id', 'cate_id', 'store_id'], 'user_rank_diff')\n",
    "    feature.to_hdf('../feature/rank/user_rank_diff_feature', 'all')\n",
    "    print('>>> user_item_rank_diff_feature feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_user_item_rank_diff_rank_cate(feature, ascending=False):\n",
    "    \"\"\"\n",
    "    用户 * 商品 * 排序(cate_id) 基础特征：\n",
    "    \n",
    "    备注：\n",
    "    1. desc降序：线下：0.8880→0.8884  提升:+0.0004\n",
    "    1.  asc升序：线下：0.8880→0.8885  提升:+0.0005\n",
    "    ---------------------------------------------\n",
    "    \"\"\"\n",
    "    if ascending:\n",
    "        name = 'asc'\n",
    "    else:\n",
    "        name = 'desc'\n",
    "    columns = []\n",
    "    if 'cate_id' not in feature.columns:\n",
    "        item = pd.read_csv('../data/Antai_AE_round2_item_attr_20190813.zip')[['item_id', 'cate_id']]\n",
    "        feature = pd.merge(feature, item, how='left', on=['item_id'])\n",
    "    \n",
    "    for col in feature.drop(['buyer_admin_id', 'item_id', 'cate_id'], 1).columns:\n",
    "        column_name = col + '_RANK_' + 'name'\n",
    "        feature[column_name] = feature.groupby(['buyer_admin_id', 'cate_id'])[col].rank(ascending=ascending, method='dense')\n",
    "        columns.append(column_name)\n",
    "        \n",
    "    feature = feature[['buyer_admin_id', 'item_id', 'cate_id'] + columns]\n",
    "    feature.to_hdf('../feature/rank/user_item_rank_diff_rank_cate', name)\n",
    "    print('>>> user_item_rank_diff_rank_cate feature success')\n",
    "    return feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_item_conv_feature(df):\n",
    "    \"\"\"\n",
    "    商品转化率特征\n",
    "    df = get_hdf('all', if_filter_label=True, if_drop_duplicates=True)\n",
    "    get_item_conv_feature(df)\n",
    "    \"\"\"\n",
    "    item_pv = df.drop_duplicates(subset=['buyer_admin_id', 'item_id', 'second']).groupby(['item_id']).size().to_frame('pv').reset_index()\n",
    "    item_uv = df.groupby(['item_id'])['buyer_admin_id'].nunique().to_frame('uv').reset_index()\n",
    "    item_buy_uv = df[df['buy_flag']==1].groupby(['item_id'])['buyer_admin_id'].nunique().to_frame('buy_uv').reset_index()\n",
    "\n",
    "    dup = df[df['buy_flag']==1][df.duplicated(subset=['buyer_admin_id', 'item_id', 'second'], keep=False)]\n",
    "    multi_buy_uv = dup.groupby(['item_id'])['buyer_admin_id'].nunique().to_frame('multi_buy_uv').reset_index()\n",
    "\n",
    "    view_time = df.groupby(['buyer_admin_id', 'item_id']).size().to_frame('user_view_time').reset_index()\n",
    "    view_one_time = view_time.groupby(['item_id'])['user_view_time'].value_counts(normalize=True).to_frame('view_onetime_prop').reset_index()\n",
    "    view_one_time = view_one_time[view_one_time['user_view_time']==1].drop(['user_view_time'],1 )\n",
    "    \n",
    "    last = df.drop_duplicates(subset=['buyer_admin_id'], keep='first')\n",
    "    last_cnt = last.groupby(['item_id']).size().to_frame('last_buy').reset_index()\n",
    "    \n",
    "    last_via_day = df.drop_duplicates(subset=['buyer_admin_id', 'day'], keep='first')\\\n",
    "        .drop_duplicates(subset=['buyer_admin_id', 'item_id'], keep='first')\n",
    "    last_via_day_cnt = last_via_day.groupby(['item_id']).size().to_frame('last_buy_day').reset_index()\n",
    "    \n",
    "    \n",
    "    feature = item_pv.merge(item_uv, on=['item_id'], how='left')\\\n",
    "            .merge(item_buy_uv, on=['item_id'], how='left')\\\n",
    "            .merge(multi_buy_uv, on=['item_id'], how='left')\\\n",
    "            .merge(view_one_time, on=['item_id'], how='left')\\\n",
    "            .merge(last_cnt, on=['item_id'], how='left')\\\n",
    "            .merge(last_via_day_cnt, on=['item_id'], how='left').fillna(0)\n",
    "\n",
    "    feature['pv/uv'] = feature['pv'] / feature['uv']\n",
    "    feature['buy_uv/pv'] = feature['buy_uv'] / feature['uv']\n",
    "    feature['multi_buy_uv/buy_uv'] = feature['multi_buy_uv'] / feature['buy_uv']\n",
    "    feature['multi_buy_uv/uv'] = feature['multi_buy_uv'] / feature['uv']\n",
    "    feature['last_buy/uv'] = feature['last_buy'] / feature['uv']\n",
    "    feature['last_buy/buy_uv'] = feature['last_buy'] / feature['buy_uv']\n",
    "    feature = feature.fillna(0)\n",
    "    \n",
    "    feature = add_prefix(feature, ['item_id'], 'item_conv_')\n",
    "    feature.to_hdf('../feature/rank/item_conv_feature', 'all')\n",
    "    print('>>> item_conv_feature success')\n",
    "    return feature"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python3 (PySpark)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
