{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.384798Z",
     "start_time": "2024-09-20T12:09:46.364880Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import pandas as p\n",
    "import numpy as n\n",
    "import xgboost as xgb\n",
    "from datetime import date"
   ],
   "id": "b803ba0723f6d1b4",
   "outputs": [],
   "execution_count": 33
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.400711Z",
     "start_time": "2024-09-20T12:09:46.388743Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def getWeekday(row):\n",
    "    if row == 'nan':\n",
    "        return n.nan\n",
    "    else:\n",
    "        return date(int(row[0:4]), int(row[4:6]), int(row[6:8])).weekday() + 1"
   ],
   "id": "e58c17bab77f815f",
   "outputs": [],
   "execution_count": 34
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.415671Z",
     "start_time": "2024-09-20T12:09:46.403705Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def prepr(raw):\n",
    "    pre = raw.copy()\n",
    "    pre['num'] = 1\n",
    "    # 折扣率\n",
    "    pre['MJ'] = list(map(lambda x: 1 if ':' in str(x) else 0, pre['Discount_rate']))\n",
    "    pre['JIAN'] = list(map(lambda x: int(str(x).split(':')[1]) if \":\" in repr(x) else 0, pre['Discount_rate']))\n",
    "    pre['MI_COST'] = list(map(lambda x: int(str(x).split(':')[0]) if \":\" in repr(x) else 0, pre['Discount_rate']))\n",
    "    pre['DISCOUNT'] = list(map(lambda x: (float(str(x).split(':')[0]) - float(str(x).split(':')[1])) / float(\n",
    "        str(x).split(':')[0]) if \":\" in repr(x) else float(x), pre['Discount_rate']))\n",
    "    # 距离\n",
    "    pre['Distance'].fillna(-1, inplace=True)\n",
    "    pre['NUII_DISTANCE'] = pre['Distance'].map(lambda x: 1 if x == -1 else 0)\n",
    "    # 时间\n",
    "    pre['DATE_RECEIVED'] = p.to_datetime(pre['Date_received'], format='%Y%m%d')\n",
    "    if 'Date' in pre.columns.tolist():\n",
    "        pre['DATE'] = p.to_datetime(pre['Date'], format='%Y%m%d')\n",
    "        pre['label'] = list(\n",
    "            map(lambda y, x: 1 if (y - x).total_seconds() / (24 * 3600) <= 15 else 0, pre['DATE_RECEIVED'],\n",
    "                pre['DATE']))\n",
    "        pre['label'] = pre['label'].map(int)\n",
    "    pre['weekday'] = pre['Date_received'].astype(str).apply(getWeekday)\n",
    "    pre['weekday_type'] = pre['weekday'].apply(lambda x : 1 if x in [6,7] else 0)\n",
    "    return pre"
   ],
   "id": "457eb1c3c1e449df",
   "outputs": [],
   "execution_count": 35
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.430636Z",
     "start_time": "2024-09-20T12:09:46.417665Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def construct_data(history, label):\n",
    "    label_f = get_label_f(label)\n",
    "    history_f = get_history_f(history, label)\n",
    "    # 构造数据集\n",
    "    commom = list(set(label_f.columns.tolist()) & set(history_f.columns.tolist()))\n",
    "    data = p.concat([label_f, history_f.drop(commom, axis=1)], axis=1)\n",
    "    # 去重\n",
    "    data.drop_duplicates(subset=None, keep='last', inplace=True)\n",
    "    data.index = range(len(data))\n",
    "    return data"
   ],
   "id": "3ca08a0f7f32403",
   "outputs": [],
   "execution_count": 36
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.603900Z",
     "start_time": "2024-09-20T12:09:46.434622Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_history_f(history, label):\n",
    "    data = history.copy()\n",
    "    data['Coupon_id'] = data['Coupon_id'].map(int)\n",
    "    data['Date_received'] = data['Date_received'].map(int)\n",
    "    h_f = label.copy()\n",
    "    ###########################      用户\n",
    "    keys = ['User_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    # 用户  领满减数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'MJ'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减率\n",
    "    h_f[prefixs + 'lu_MJ'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'MJ'], h_f[prefixs + 'received']))\n",
    "\n",
    "    # 用户15天内核销最大折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销最小折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销平均折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销中位折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_mean'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券减额中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_medain'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################         用户+商家\n",
    "    keys = ['User_id', 'Merchant_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    # 用户+商家15天内核销最大折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销最小折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销平均折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销中位折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_medain'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################           用户+优惠券\n",
    "    keys = ['User_id', 'Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################             用户+折扣率\n",
    "    keys = ['User_id', 'DISCOUNT']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+折扣率 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+折扣率 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+折扣率 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################            用户+日期\n",
    "    keys = ['User_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    ################################# todo            用户+周内/周末\n",
    "    keys = ['User_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+周内/周末核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+周内/周末核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    \n",
    "  \n",
    "    #################################                用户+商家+优惠券\n",
    "    keys = ['User_id', 'Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+优惠券 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+优惠券 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+优惠券 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################                   用户+商家+日期\n",
    "    keys = ['User_id', 'Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    #  用户+商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+日期 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+日期 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################  todo                 用户+商家+周内/周末\n",
    "    keys = ['User_id', 'Merchant_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    #  用户+商家+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+周内/周末 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+周内/周末 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    #################################                   用户+优惠券+日期\n",
    "    keys = ['User_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+日期 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+日期 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+日期 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "#################################todo                   用户+优惠券+周末/周内\n",
    "    keys = ['User_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+周末/周内 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+周末/周内 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+周末/周内 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################        商家\n",
    "    keys = ['Merchant_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    # 商家15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销最大折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销最小折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销平均折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销中位折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_medain'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################             商家+优惠券\n",
    "    keys = ['Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################             商家+折扣率\n",
    "    keys = ['Merchant_id', 'DISCOUNT']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+折扣率核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+折扣率核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################             商家+日期\n",
    "    keys = ['Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "     #################################  todo           商家+周末/周内\n",
    "    keys = ['Merchant_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+周末/周内领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+周末/周内核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+周末/周内核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    #################################             商家+优惠券+日期\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+日期 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    ################################# todo             商家+优惠券+周内/周末\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+周内/周末 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+周内/周末核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+周内/周末核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################               优惠券\n",
    "    keys = ['Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    # 优惠券15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 优惠券15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 优惠券15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_mean'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 优惠券15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    #################################               优惠券+日期\n",
    "    keys = ['Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    #################################       todo        优惠券+周内/周末\n",
    "    keys = ['Coupon_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+周内/周末核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+周内/周末核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################             折扣率\n",
    "    keys = ['DISCOUNT']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 折扣率核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 折扣率核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################日期\n",
    "    keys = ['DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 当日领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 当日核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 当日核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received'])) \n",
    "    \n",
    "    # 当日15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 当日15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 当日15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_mean'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 当日15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户距离正反排序\n",
    "    h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
    "    h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 用户折扣正反排序\n",
    "    h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
    "    h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 用户领券日期正反排序\n",
    "    h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    ####\n",
    "    # 商家距离正反排序\n",
    "    h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
    "    h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 商家折扣正反排序\n",
    "    h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
    "    h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 商家领券日期正反排序\n",
    "    h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    #####\n",
    "\n",
    "    ############################################~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
    "    # 优惠券距离正反排序\n",
    "    h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
    "    h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券折扣正反排序\n",
    "    h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
    "    h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券领券日期正反排序\n",
    "    h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    h_f.fillna(0, downcast='infer', inplace=True)\n",
    "    return h_f"
   ],
   "id": "e6f67e35a87e7778",
   "outputs": [],
   "execution_count": 37
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.741117Z",
     "start_time": "2024-09-20T12:09:46.617765Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_label_f(label):\n",
    "    data = label.copy()\n",
    "    data['Coupon_id'] = data['Coupon_id'].map(int)\n",
    "    data['Date_received'] = data['Date_received'].map(int)\n",
    "    l_f = label.copy()\n",
    "    ###################################用户\n",
    "    keys = ['User_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 每个用户领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    # 用户领券的最大距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领券的平均距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领券的距离中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率最大值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率最小值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率平均数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_mean'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费平均数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 用户第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 用户最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################用户+商家\n",
    "    keys = ['User_id', 'Merchant_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率最大值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率最小值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率平均数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_mean'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费平均数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 用户+商家第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 用户+商家最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################用户+优惠券\n",
    "    keys = ['User_id', 'Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################用户+折扣率\n",
    "    keys = ['User_id', 'DISCOUNT']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 用户+折扣率第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 用户+折扣率最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################用户+日期\n",
    "    keys = ['User_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #todo 用户+日类型领券数 周内/周末\n",
    "    keys = ['User_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################用户+商家+优惠券\n",
    "    keys = ['User_id', 'Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################用户+商家+日期\n",
    "    keys = ['User_id', 'Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    ################################# todo 用户+商家+周内/周末\n",
    "    keys = ['User_id', 'Merchant_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################用户+优惠券+日期\n",
    "    keys = ['User_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #todo 20240920 #################################用户+优惠券+周类型周内或周末\n",
    "    keys = ['User_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+周类型周内或周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################商家\n",
    "    keys = ['Merchant_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家被领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最大距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率最大值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率最小值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率平均数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_mean'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费平均数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 商家被第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 商家被最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################商家+优惠券\n",
    "    keys = ['Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################商家+折扣率\n",
    "    keys = ['Merchant_id', 'DISCOUNT']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 商家+折扣率第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 商家+折扣率最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################商家+日期\n",
    "    keys = ['Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################todo 商家+周内/周末\n",
    "    keys = ['Merchant_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################商家+优惠券+日期\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################todo 商家+优惠券+周内/周末\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################优惠券\n",
    "    keys = ['Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    # 优惠券被领券的最大距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 优惠券被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 优惠券被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 优惠券被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################优惠券+日期\n",
    "    keys = ['Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    ################################# todo 优惠券+周内/周末\n",
    "    keys = ['Coupon_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################折扣率\n",
    "    keys = ['DISCOUNT']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 折扣率 被领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 折扣率 被第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 折扣率 被最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################日期\n",
    "    keys = ['DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 当日领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    # 用户距离正反排序\n",
    "    l_f['label_User_distance_true_rank'] = l_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
    "    l_f['label_User_distance_False_rank'] = l_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 用户折扣正反排序\n",
    "    l_f['label_User_discount_rate_true_rank'] = l_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
    "    l_f['label_User_discount_rate_False_rank'] = l_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    ####\n",
    "    # 商家距离正反排序\n",
    "    l_f['label_Merchant_distance_true_rank'] = l_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
    "    l_f['label_Merchant_distance_False_rank'] = l_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 商家折扣正反排序\n",
    "    l_f['label_Merchant_discount_rate_true_rank'] = l_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
    "    l_f['label_Merchant_discount_rate_False_rank'] = l_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    #####\n",
    "\n",
    "    ############################################~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
    "    # 优惠券距离正反排序\n",
    "    l_f['label_Coupon_distance_true_rank'] = l_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
    "    l_f['label_Coupon_distance_False_rank'] = l_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券折扣正反排序\n",
    "    l_f['label_Coupon_discount_rate_true_rank'] = l_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
    "    l_f['label_Coupon_discount_rate_False_rank'] = l_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券领券日期正反排序\n",
    "    l_f['label_Coupon_date_received_true_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    l_f.fillna(0, downcast='infer', inplace=True)\n",
    "    return l_f"
   ],
   "id": "c2f56294b15afcc7",
   "outputs": [],
   "execution_count": 38
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:09:46.757095Z",
     "start_time": "2024-09-20T12:09:46.743120Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def model_xgb(train, test):\n",
    "    params = {\n",
    "        \"booster\": 'gbtree',# 'gbtree' 表示使用基于树的模型进行梯度提升\n",
    "        'objective': 'binary:logistic',#定义学习任务的目标函数。 'binary:logistic' 用于二分类问题，将输出结果映射到概率上，适合逻辑回归类的二分类任务。\n",
    "        'eval_metric': 'auc','auc' #即 Area Under the Curve（曲线下面积），用于衡量二分类模型的性能，它综合考虑了不同阈值下的分类效果\n",
    "        'silent': 0,  # (静默模式,1开0关)值为 0 表示关闭静默模式，会输出模型训练过程中的相关信息；值为 1 则开启静默模式，不输出信息。\n",
    "        'eta': 0.01,  # (0.01~0.2,,,0.01) 学习率，也称为收缩率。 取值范围在 0.01 - 0.2 之间，这里设置为 0.01 。较小的学习率意味着模型在每次更新时步子较小，训练过程会更稳定，但可能需要更多的迭代次数才能收敛。\n",
    "        'max_depth': 5,  # (3~10,,,6)数的最大深度：限制每棵树的生长深度，取值在 3 - 10 之间，这里设为 5。较浅的树可以防止过拟合，但可能会损失一些模型的表达能力。\n",
    "        'min_child_weight': 1,# 子节点中最小的样本权重和。如果一个节点的样本权重和小于这个值，算法将不再进一步分裂该节点。\n",
    "        'gamma': 0,# 指定节点分裂所需的最小损失减少值，值为 0 表示对节点分裂的限制相对宽松，较大的值会使模型更保守地进行节点分裂\n",
    "        'lambda': 1,# L2 正则化项的系数 用于控制模型的复杂度，防止过拟合，这里设置为 1。\n",
    "        'colsample_bylevel': 0.7,  # (作用与subsample相似) 在构建每一层树结构时，对特征进行随机采样的比例。 取值在 0.5 - 1 之间，这里为 0.7 ，即每棵树随机选取 70% 的特征。\n",
    "        'colsample_bytree': 0.7,  # (0.5~1) 训练每棵树时随机采样的样本比例\n",
    "        'subsample': 0.9,  # (0.5~1) 训练每棵树时随机采样的样本比例 取值在 0.5 - 1 之间，这里是 0.9 ，表示每次训练树时使用 90% 的样本数据\n",
    "        'scale_pos_weight': 1,  # (算法更快收敛) 正样本权重与负样本权重的比例。设置为 1 时，表示正负样本权重相同。调整这个值可以使算法在不平衡数据上更快地收敛。\n",
    "    }\n",
    "    # 数据集\n",
    "    dtrain = xgb.DMatrix(train.drop(\n",
    "        ['User_id', 'Coupon_id', 'Merchant_id', 'Discount_rate', 'Date', 'DATE_RECEIVED', 'Date_received', 'label',\n",
    "         'DATE'], axis=1), label=train['label'])\n",
    "    \n",
    "    dtest = xgb.DMatrix(\n",
    "        test.drop(['User_id', 'Coupon_id', 'Merchant_id', 'Discount_rate', 'DATE_RECEIVED', 'Date_received'], axis=1))\n",
    "    # 训练\n",
    "    watchlist = [(dtrain, 'train')]\n",
    "    model = xgb.train(params, dtrain, 3, watchlist)\n",
    "    # 预测\n",
    "    predict = model.predict(dtest)\n",
    "    # 结果\n",
    "    predict = p.DataFrame(predict, columns=['prob'])\n",
    "    result = p.concat([test[['User_id', 'Coupon_id', 'Date_received']], predict], axis=1)\n",
    "    # 特征的重要性\n",
    "    feat_importance = p.DataFrame(columns=['feature_name', 'importance'])\n",
    "    feat_importance['feature_name'] = model.get_score().keys()\n",
    "    feat_importance['importance'] = model.get_score().values()\n",
    "    feat_importance.sort_values(['importance'], ascending=False, inplace=True)\n",
    "    return result, feat_importance"
   ],
   "id": "5f011fe790c84237",
   "outputs": [],
   "execution_count": 39
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-20T12:25:01.575980Z",
     "start_time": "2024-09-20T12:14:25.389518Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 原数据\n",
    "raw_train = p.read_csv(\"data/ccf_offline_stage1_train.csv\")\n",
    "raw_test = p.read_csv(\"data/ccf_offline_stage1_test_revised.csv\")\n",
    "# 预处理\n",
    "prepr_train = prepr(raw_train)\n",
    "prepr_test = prepr(raw_test)\n",
    "# 划分区间\n",
    "# 训练集 历史，中间，标签区间\n",
    "train_history = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/3/2', periods=60))]\n",
    "train_label = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/5/16', periods=31))]\n",
    "# 验证集 历史，中间，标签区间\n",
    "verification_history = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/1/16', periods=60))]\n",
    "verification_label = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/3/31', periods=31))]\n",
    "# 测试集 历史，中间，标签区间\n",
    "test_history = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/4/17', periods=60))]\n",
    "test_label = prepr_test.copy()\n",
    "# 构造数据集\n",
    "complete_train = construct_data(train_history, train_label)\n",
    "complete_verification = construct_data(verification_history, verification_label)\n",
    "complete_test = construct_data(test_history, test_label)\n",
    "good_train = p.concat([complete_train, complete_verification], axis=0)\n",
    "\n",
    "result, feat_importance = model_xgb(good_train, complete_test)\n",
    "\n",
    "result.to_csv(\"data/result-0920-02-add-features.csv\", index=False, header=None)\n",
    "feat_importance.to_csv(\"data/result_feat_importance-0920-02-add-features.csv\", index=False, header=None)"
   ],
   "id": "deead005de45dc28",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:82: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:86: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:146: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:150: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:170: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:174: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:303: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:307: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:328: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:332: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:415: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:419: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:459: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:456: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:471: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:486: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:518: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:534: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:550: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:566: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:586: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:587: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:590: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:591: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:594: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:595: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:599: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:600: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:603: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:604: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:607: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:608: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:614: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:615: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:618: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:619: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:622: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:623: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:82: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:86: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:146: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:150: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:170: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:174: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:303: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:307: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:328: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:332: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:415: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:419: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:459: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:456: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:471: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:486: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:518: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:534: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:550: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:566: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:586: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:587: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:590: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:591: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:594: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:595: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:599: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:600: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:603: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:604: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:607: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:608: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:614: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:615: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:618: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:619: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:622: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:623: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:82: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:86: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:146: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:150: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:170: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:174: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:303: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:307: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:328: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:332: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:415: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:419: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\2782501759.py:459: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:456: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:471: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:486: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:518: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:534: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:550: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:566: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:586: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:587: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:590: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:591: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:594: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:595: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:599: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:600: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:603: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:604: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:607: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:608: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:614: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:615: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:618: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:619: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:622: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_1960\\1253616806.py:623: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "E:\\anaconda\\lib\\site-packages\\xgboost\\core.py:723: FutureWarning: Pass `evals` as keyword args.\n",
      "  warnings.warn(msg, FutureWarning)\n",
      "E:\\anaconda\\lib\\site-packages\\xgboost\\core.py:158: UserWarning: [20:24:58] WARNING: C:\\b\\abs_90_bwj_86a\\croot\\xgboost-split_1724073762025\\work\\src\\learner.cc:740: \n",
      "Parameters: { \"aucsilent\" } are not used.\n",
      "\n",
      "  warnings.warn(smsg, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0]\ttrain-auc:0.84720\n",
      "[1]\ttrain-auc:0.85947\n",
      "[2]\ttrain-auc:0.86001\n"
     ]
    }
   ],
   "execution_count": 41
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "a2de98a40b231d6a"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
