{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:06.198098Z",
     "start_time": "2024-09-24T01:58:06.174843Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import pandas as p\n",
    "import numpy as n\n",
    "import xgboost as xgb\n",
    "from datetime import date\n",
    "import lightgbm as lgb"
   ],
   "id": "b803ba0723f6d1b4",
   "outputs": [],
   "execution_count": 32
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:06.334711Z",
     "start_time": "2024-09-24T01:58:06.304792Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def getWeekday(row):\n",
    "    if row == 'nan':\n",
    "        return n.nan\n",
    "    else:\n",
    "        return date(int(row[0:4]), int(row[4:6]), int(row[6:8])).weekday() + 1"
   ],
   "id": "e58c17bab77f815f",
   "outputs": [],
   "execution_count": 33
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:06.689610Z",
     "start_time": "2024-09-24T01:58:06.648720Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def prepr(raw):\n",
    "    pre = raw.copy()\n",
    "    pre['num'] = 1\n",
    "    # 折扣率\n",
    "    pre['MJ'] = list(map(lambda x: 1 if ':' in str(x) else 0, pre['Discount_rate']))\n",
    "    pre['JIAN'] = list(map(lambda x: int(str(x).split(':')[1]) if \":\" in repr(x) else 0, pre['Discount_rate']))\n",
    "    pre['MI_COST'] = list(map(lambda x: int(str(x).split(':')[0]) if \":\" in repr(x) else 0, pre['Discount_rate']))\n",
    "    pre['DISCOUNT'] = list(map(lambda x: (float(str(x).split(':')[0]) - float(str(x).split(':')[1])) / float(str(x).split(':')[0]) if \":\" in repr(x) else  float(x), pre['Discount_rate']))\n",
    "    # 距离\n",
    "    pre['Distance'].fillna(-1, inplace=True)\n",
    "    pre['NUII_DISTANCE'] = pre['Distance'].map(lambda x: 1 if x == -1 else 0)\n",
    "    # 时间\n",
    "    pre['DATE_RECEIVED'] = p.to_datetime(pre['Date_received'], format='%Y%m%d')\n",
    "    if 'Date' in pre.columns.tolist():\n",
    "        pre['DATE'] = p.to_datetime(pre['Date'], format='%Y%m%d')\n",
    "        pre['label'] = list(\n",
    "            map(lambda y, x: 1 if (y - x).total_seconds() / (24 * 3600) <= 15 else 0, pre['DATE_RECEIVED'],\n",
    "                pre['DATE']))\n",
    "        pre['label'] = pre['label'].map(int)\n",
    "    pre['weekday'] = pre['Date_received'].astype(str).apply(getWeekday)\n",
    "    pre['weekday_type'] = pre['weekday'].apply(lambda x : 1 if x in [6,7] else 0)\n",
    "    return pre"
   ],
   "id": "457eb1c3c1e449df",
   "outputs": [],
   "execution_count": 34
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:06.986815Z",
     "start_time": "2024-09-24T01:58:06.961884Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def prepr_online(raw):\n",
    "    pre = raw.copy()\n",
    "    pre['num'] = 1\n",
    "    # 折扣率\n",
    "    pre['MJ'] = list(map(lambda x: 1 if ':' in str(x) else 0, pre['Discount_rate']))\n",
    "    pre['JIAN'] = list(map(lambda x: int(str(x).split(':')[1]) if \":\" in repr(x) else 0, pre['Discount_rate']))\n",
    "    pre['MI_COST'] = list(map(lambda x: int(str(x).split(':')[0]) if \":\" in repr(x) else 0, pre['Discount_rate']))\n",
    "    pre['DISCOUNT'] = list(map(lambda x: (float(str(x).split(':')[0]) - float(str(x).split(':')[1])) / float(str(x).split(':')[0]) if \":\" in repr(x) else  float(x) if str(x).replace('.', '', 1).isdigit() else 0, pre['Discount_rate']))\n",
    "    # 时间\n",
    "    pre['DATE_RECEIVED'] = p.to_datetime(pre['Date_received'], format='%Y%m%d')\n",
    "    if 'Date' in pre.columns.tolist():\n",
    "        pre['DATE'] = p.to_datetime(pre['Date'], format='%Y%m%d')\n",
    "        pre['label'] = list(\n",
    "            map(lambda y, x: 1 if (y - x).total_seconds() / (24 * 3600) <= 15 else 0, pre['DATE_RECEIVED'],\n",
    "                pre['DATE']))\n",
    "        pre['label'] = pre['label'].map(int)\n",
    "    pre['weekday'] = pre['Date_received'].astype(str).apply(getWeekday)\n",
    "    pre['weekday_type'] = pre['weekday'].apply(lambda x : 1 if x in [6,7] else 0)\n",
    "    return pre"
   ],
   "id": "b7d0d98d19059d9b",
   "outputs": [],
   "execution_count": 35
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:07.031695Z",
     "start_time": "2024-09-24T01:58:07.019728Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def construct_data(history,train_online, label):\n",
    "    label_f = get_label_f(label)\n",
    "    history_f = get_history_f(history, label)\n",
    "   #todo 构造线上历史特征\n",
    "    x=get_online_f(train_online,history_f)\n",
    "    # 构造数据集\n",
    "    commom = list(set(label_f.columns.tolist()) & set(x.columns.tolist()))\n",
    "    data = p.concat([label_f, history_f.drop(commom, axis=1)], axis=1)\n",
    "    # 去重\n",
    "    data.drop_duplicates(subset=None, keep='last', inplace=True)\n",
    "    data.index = range(len(data))\n",
    "    return data"
   ],
   "id": "3ca08a0f7f32403",
   "outputs": [],
   "execution_count": 36
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:07.138410Z",
     "start_time": "2024-09-24T01:58:07.096523Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_online_f(online_history,offline_history):\n",
    "    data = online_history.copy()\n",
    "    data = data.drop(data[data['Coupon_id'] == 'fixed'].index)\n",
    "    data['Coupon_id'] = data['Coupon_id'].map(int)\n",
    "    data['Date_received'] = data['Date_received'].map(int)\n",
    "    x = offline_history.copy()\n",
    "    keys=['User_id']\n",
    "    # 用户线上操作次数\n",
    "    temp = data.groupby('User_id').size().reset_index(name='online_operation_cnt')\n",
    "    x = p.merge(x,temp,how='left',on=keys)\n",
    "    #用户线上点击次数\n",
    "    temp = data[data.Action==0].groupby('User_id').size().reset_index(name='online_click_cnt')\n",
    "    x = p.merge(x,temp,how='left',on=keys)\n",
    "    #用户线上点击率\n",
    "    x['online_click_rate']=x.online_click_cnt/x.online_operation_cnt\n",
    "    #用户线上购买次数\n",
    "    temp = data[data.Action==1].groupby(keys).size().reset_index(name='online_shop_cnt')\n",
    "    x=p.merge(x,temp,how='left',on=keys)\n",
    "    #用户线上购买率\n",
    "    x['online_shop_rate'] = x.online_shop_cnt/x.online_operation_cnt\n",
    "    #用户线上领取次数\n",
    "    temp = data[data.Action !=0].groupby('User_id').size().reset_index(name='online_receive_cnt')\n",
    "    x=p.merge(x,temp,how='left',on=keys)\n",
    "    #用户线上领取率\n",
    "    x['online_receive_rate'] = x.online_receive_cnt/x.online_operation_cnt\n",
    "    #用户线上不消费次数\n",
    "    temp = data[(p.isnull(data.Date)) &(data.Coupon_id!=0)].groupby('User_id').size().reset_index(name='online_none_consume_cnt')\n",
    "    x=p.merge(x,temp,how='left',on=keys)\n",
    "    #用户线上核销次数\n",
    "    temp = data[(p.notnull(data['Date'])) & (data['Coupon_id']!=0)].groupby('User_id').size().reset_index(name='online_use_coupon_cnt')\n",
    "    x=p.merge(x,temp,how='left',on=keys)\n",
    "    #用户线上优惠券核销率\n",
    "    x['online_coupon_use_rate'] = x.online_use_coupon_cnt/x.online_receive_cnt\n",
    "    keys=['User_id','weekday_type']\n",
    "    #用户周内/周末线上领取次数\n",
    "    temp = data[data.Action !=0].groupby(keys).size().reset_index(name='online_receive_weekdays_cnt')\n",
    "    x=p.merge(x,temp,how='left',on=keys)\n",
    "    \n",
    "    # 用户周内/周末线上核销次数\n",
    "    temp = data[(p.isnull(data.Date)) &(data.Coupon_id!=0)].groupby(keys).size().reset_index(name='online_use_coupon_weekday_type_cnt')\n",
    "    x=p.merge(x,temp,how='left',on=keys)\n",
    "    \n",
    "    # 用户周内/周末线上核销率\n",
    "    x['online_use_coupon_weekday_type_rate'] = x.online_use_coupon_weekday_type_cnt/x.online_receive_weekdays_cnt\n",
    "    \n",
    "    return x"
   ],
   "id": "a54c17c5cfc1cc0f",
   "outputs": [],
   "execution_count": 37
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:07.697895Z",
     "start_time": "2024-09-24T01:58:07.281030Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_history_f(history, label):\n",
    "    data = history.copy()\n",
    "    data['Coupon_id'] = data['Coupon_id'].map(int)\n",
    "    data['Date_received'] = data['Date_received'].map(int)\n",
    "    h_f = label.copy()\n",
    "    ###########################      用户\n",
    "    keys = ['User_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    # 用户  领满减数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'MJ'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减率\n",
    "    h_f[prefixs + 'lu_MJ'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'MJ'], h_f[prefixs + 'received']))\n",
    "\n",
    "    # 用户15天内核销最大折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销最小折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销平均折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销中位折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_mean'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户15天内核销满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券减额中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户15天内核销满减券最低消费中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_medain'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################         用户+商家\n",
    "    keys = ['User_id', 'Merchant_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    # 用户+商家15天内核销最大折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销最小折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销平均折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销中位折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券减额中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家15天内核销满减券最低消费中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_medain'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################           用户+优惠券\n",
    "    keys = ['User_id', 'Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################             用户+折扣率\n",
    "    keys = ['User_id', 'DISCOUNT']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+折扣率 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+折扣率 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+折扣率 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################            用户+日期\n",
    "    keys = ['User_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    ################################# todo            用户+周内/周末\n",
    "    keys = ['User_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+周内/周末核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+周内/周末核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    \n",
    "  \n",
    "    #################################                用户+商家+优惠券\n",
    "    keys = ['User_id', 'Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+优惠券 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+优惠券 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+优惠券 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################                   用户+商家+日期\n",
    "    keys = ['User_id', 'Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    #  用户+商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+日期 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+日期 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################  todo                 用户+商家+周内/周末\n",
    "    keys = ['User_id', 'Merchant_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    #  用户+商家+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+周内/周末 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家+周内/周末 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    #################################                   用户+优惠券+日期\n",
    "    keys = ['User_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+日期 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+日期 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+日期 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "#################################todo                   用户+优惠券+周末/周内\n",
    "    keys = ['User_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+周末/周内 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+周末/周内 核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 用户+优惠券+周末/周内 核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################        商家\n",
    "    keys = ['Merchant_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    # 商家15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 商家15天内核销最大折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销最小折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销平均折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销中位折扣率\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券减额中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费平均值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家15天内核销满减券最低消费中位值\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_medain'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################             商家+优惠券\n",
    "    keys = ['Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################             商家+折扣率\n",
    "    keys = ['Merchant_id', 'DISCOUNT']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+折扣率核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+折扣率核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################             商家+日期\n",
    "    keys = ['Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "     #################################  todo           商家+周末/周内\n",
    "    keys = ['Merchant_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+周末/周内领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+周末/周内核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+周末/周内核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    #################################             商家+优惠券+日期\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+日期 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    ################################# todo             商家+优惠券+周内/周末\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+周内/周末 领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+周内/周末核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 商家+优惠券+周内/周末核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    #################################               优惠券\n",
    "    keys = ['Coupon_id']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    # 优惠券15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 优惠券15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 优惠券15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_mean'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 优惠券15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    #################################               优惠券+日期\n",
    "    keys = ['Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+日期核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+日期核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "    \n",
    "    #################################       todo        优惠券+周内/周末\n",
    "    keys = ['Coupon_id', 'weekday_type']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+周内/周末核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 优惠券+周内/周末核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################             折扣率\n",
    "    keys = ['DISCOUNT']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 折扣率核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 折扣率核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received']))\n",
    "\n",
    "    #################################日期\n",
    "    keys = ['DATE_RECEIVED']\n",
    "    prefixs = 'history_field_' + '_'.join(keys) + '_'\n",
    "    # 当日领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 当日核销数\n",
    "    pivot = p.DataFrame(\n",
    "        data[data['Date'].map(lambda x: str(x) != 'nan')].pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received_use'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, on=keys, how='left')\n",
    "    # 当日核销率\n",
    "    h_f[prefixs + 'lu_use'] = list(\n",
    "        map(lambda x, y: x / y if y != 0 else 0, h_f[prefixs + 'received_use'], h_f[prefixs + 'received'])) \n",
    "    \n",
    "    # 当日15天内核销的最大距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_max'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 当日15天内核销的最小距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_min'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 当日15天内核销的平均距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_mean'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 当日15天内核销的中位距离\n",
    "    pivot = p.DataFrame(data[data['label'] == 1].pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + 'Distance_15_median'}).reset_index()\n",
    "    h_f = p.merge(h_f, pivot, how='left', on=keys)\n",
    "    # 用户距离正反排序\n",
    "    h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
    "    h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 用户折扣正反排序\n",
    "    h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
    "    h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 用户领券日期正反排序\n",
    "    h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    ####\n",
    "    # 商家距离正反排序\n",
    "    h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
    "    h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 商家折扣正反排序\n",
    "    h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
    "    h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 商家领券日期正反排序\n",
    "    h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    #####\n",
    "\n",
    "    ############################################~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
    "    # 优惠券距离正反排序\n",
    "    h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
    "    h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券折扣正反排序\n",
    "    h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
    "    h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券领券日期正反排序\n",
    "    h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    h_f.fillna(0, downcast='infer', inplace=True)\n",
    "    return h_f"
   ],
   "id": "e6f67e35a87e7778",
   "outputs": [],
   "execution_count": 38
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:08.287830Z",
     "start_time": "2024-09-24T01:58:07.707870Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_label_f(label):\n",
    "    data = label.copy()\n",
    "    data['Coupon_id'] = data['Coupon_id'].map(int)\n",
    "    data['Date_received'] = data['Date_received'].map(int)\n",
    "    l_f = label.copy()\n",
    "    ###################################用户\n",
    "    keys = ['User_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 每个用户领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    # 用户领券的最大距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领券的平均距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领券的距离中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率最大值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率最小值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率平均数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_mean'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户优惠券折扣率中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费平均数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券最低消费中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户领满减券减额中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 用户第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 用户最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################用户+商家\n",
    "    keys = ['User_id', 'Merchant_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率最大值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率最小值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率平均数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_mean'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家优惠券折扣率中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费平均数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券最低消费中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 用户+商家领满减券减额中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 用户+商家第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 用户+商家最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################用户+优惠券\n",
    "    keys = ['User_id', 'Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################用户+折扣率\n",
    "    keys = ['User_id', 'DISCOUNT']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 用户+折扣率第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 用户+折扣率最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################用户+日期\n",
    "    keys = ['User_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #todo 用户+日类型领券数 周内/周末\n",
    "    keys = ['User_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################用户+商家+优惠券\n",
    "    keys = ['User_id', 'Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################用户+商家+日期\n",
    "    keys = ['User_id', 'Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    ################################# todo 用户+商家+周内/周末\n",
    "    keys = ['User_id', 'Merchant_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+商家+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################用户+优惠券+日期\n",
    "    keys = ['User_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #todo 20240920 #################################用户+优惠券+周类型周内或周末\n",
    "    keys = ['User_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 用户+优惠券+周类型周内或周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################商家\n",
    "    keys = ['Merchant_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家被领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最大距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率最大值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=max)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率最小值\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=min)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率平均数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.mean)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_mean'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家优惠券折扣率中位数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='DISCOUNT', aggfunc=n.median)).rename(\n",
    "        columns={'DISCOUNT': prefixs + 'DISCOUNT_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=max)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_max'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=min)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_min'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费平均数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.mean)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_aver'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券最低消费中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='MI_COST', aggfunc=n.median)).rename(\n",
    "        columns={'MI_COST': prefixs + 'MI_COST_median'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额最大值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=max)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额最小值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=min)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额平均值\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.mean)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 商家被领满减券减额中位数\n",
    "    pivot = p.DataFrame(data[data['MJ'] == 1].pivot_table(index=keys, values='JIAN', aggfunc=n.median)).rename(\n",
    "        columns={'JIAN': prefixs + \"JIAN_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 商家被第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 商家被最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################商家+优惠券\n",
    "    keys = ['Merchant_id', 'Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    #################################商家+折扣率\n",
    "    keys = ['Merchant_id', 'DISCOUNT']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+折扣率领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 商家+折扣率第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 商家+折扣率最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################商家+日期\n",
    "    keys = ['Merchant_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################todo 商家+周内/周末\n",
    "    keys = ['Merchant_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################商家+优惠券+日期\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################todo 商家+优惠券+周内/周末\n",
    "    keys = ['Merchant_id', 'Coupon_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 商家+优惠券+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################优惠券\n",
    "    keys = ['Coupon_id']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    # 优惠券被领券的最大距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=max)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_max\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 优惠券被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=min)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_min\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 优惠券被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.mean)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_aver\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    # 优惠券被领券的最小距离\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='Distance', aggfunc=n.median)).rename(\n",
    "        columns={'Distance': prefixs + \"Distance_median\"}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    #################################优惠券+日期\n",
    "    keys = ['Coupon_id', 'DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+日期领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    ################################# todo 优惠券+周内/周末\n",
    "    keys = ['Coupon_id', 'weekday_type']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 优惠券+周内/周末领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    \n",
    "    #################################折扣率\n",
    "    keys = ['DISCOUNT']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 折扣率 被领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "    tmp = data[keys + ['DATE_RECEIVED']].sort_values(['DATE_RECEIVED'], ascending=True)\n",
    "    # 折扣率 被第一次领券\n",
    "    first = tmp.drop_duplicates(keys, keep=\"first\")\n",
    "    first[prefixs + \"is_first_received\"] = 1\n",
    "    l_f = p.merge(l_f, first, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    # 折扣率 被最后一次领券\n",
    "    last = tmp.drop_duplicates(keys, keep=\"last\")\n",
    "    last[prefixs + \"is_last_received\"] = 1\n",
    "    l_f = p.merge(l_f, last, on=keys + ['DATE_RECEIVED'], how=\"left\")\n",
    "    #################################日期\n",
    "    keys = ['DATE_RECEIVED']\n",
    "    prefixs = 'label_field_' + '_'.join(keys) + '_'\n",
    "    # 当日领券数\n",
    "    pivot = p.DataFrame(data.pivot_table(index=keys, values='num', aggfunc=len)).rename(\n",
    "        columns={'num': prefixs + 'received'}).reset_index()\n",
    "    l_f = p.merge(l_f, pivot, on=keys, how='left')\n",
    "\n",
    "    # 用户距离正反排序\n",
    "    l_f['label_User_distance_true_rank'] = l_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
    "    l_f['label_User_distance_False_rank'] = l_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 用户折扣正反排序\n",
    "    l_f['label_User_discount_rate_true_rank'] = l_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
    "    l_f['label_User_discount_rate_False_rank'] = l_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    ####\n",
    "    # 商家距离正反排序\n",
    "    l_f['label_Merchant_distance_true_rank'] = l_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
    "    l_f['label_Merchant_distance_False_rank'] = l_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 商家折扣正反排序\n",
    "    l_f['label_Merchant_discount_rate_true_rank'] = l_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
    "    l_f['label_Merchant_discount_rate_False_rank'] = l_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    #####\n",
    "\n",
    "    ############################################~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
    "    # 优惠券距离正反排序\n",
    "    l_f['label_Coupon_distance_true_rank'] = l_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
    "    l_f['label_Coupon_distance_False_rank'] = l_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券折扣正反排序\n",
    "    l_f['label_Coupon_discount_rate_true_rank'] = l_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
    "    l_f['label_Coupon_discount_rate_False_rank'] = l_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
    "\n",
    "    # 优惠券领券日期正反排序\n",
    "    l_f['label_Coupon_date_received_true_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
    "    l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
    "\n",
    "    l_f.fillna(0, downcast='infer', inplace=True)\n",
    "    return l_f"
   ],
   "id": "c2f56294b15afcc7",
   "outputs": [],
   "execution_count": 39
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T01:58:08.422471Z",
     "start_time": "2024-09-24T01:58:08.301793Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def lightgbm_model(train, test):\n",
    "    params = {\n",
    "        'boosting_type': 'gbdt',  # 设置提升类型\n",
    "        'objective': 'binary',  # 目标函数\n",
    "        'metric': 'auc',  # 评估函数\n",
    "        'learning_rate': 0.01,  # 学习率\n",
    "        'max_depth': 5,  # 最大深度\n",
    "        'num_leaves': 31,  # 叶子节点数\n",
    "        'feature_fraction': 0.7,  # 建树的特征选择比例\n",
    "        'bagging_fraction': 0.9,  # 建树的样本采样比例\n",
    "        'bagging_freq': 5,  # k 意味着每 k 次迭代执行bagging\n",
    "        'lambda_l1': 1,  # L1 正则化\n",
    "        'lambda_l2': 1,  # L2 正则化\n",
    "        'min_child_weight': 1,  # 子节点权重阈值\n",
    "    }\n",
    "    \n",
    "    # 数据集\n",
    "    lgb_train = lgb.Dataset(train.drop(['User_id', 'Coupon_id', 'Merchant_id', 'Discount_rate', 'Date', 'DATE_RECEIVED', 'Date_received', 'label', 'DATE'], axis=1), label=train['label'])\n",
    "    lgb_test = lgb.Dataset(test.drop(['User_id', 'Coupon_id', 'Merchant_id', 'Discount_rate', 'DATE_RECEIVED', 'Date_received'], axis=1), reference=lgb_train)\n",
    "    \n",
    "    # 训练\n",
    "    gbm = lgb.train(params, lgb_train, num_boost_round=800, valid_sets=[lgb_train])\n",
    "      # 预测\n",
    "    predict = gbm.predict(test.drop(['User_id', 'Coupon_id', 'Merchant_id', 'Discount_rate', 'DATE_RECEIVED', 'Date_received'], axis=1))\n",
    "    \n",
    "    # 结果\n",
    "    predict = p.DataFrame(predict, columns=['prob'])\n",
    "    results = p.concat([test[['User_id', 'Coupon_id', 'Date_received']], predict], axis=1)\n",
    "    # 特征的重要性\n",
    "    feat_importance = p.DataFrame(columns=['feature_name', 'importance'])\n",
    "    feat_importance['feature_name'] = gbm.feature_name()\n",
    "    feat_importance['importance'] = gbm.feature_importance(importance_type='split')\n",
    "    feat_importance.sort_values(['importance'], ascending=False, inplace=True)\n",
    "    return results, feat_importance"
   ],
   "id": "5f011fe790c84237",
   "outputs": [],
   "execution_count": 40
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-09-24T02:14:26.338642Z",
     "start_time": "2024-09-24T01:58:08.428454Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 原数据\n",
    "raw_train = p.read_csv(\"data/ccf_offline_stage1_train.csv\")\n",
    "raw_train_online = p.read_csv(\"data/ccf_online_stage1_train.csv\")\n",
    "raw_test = p.read_csv(\"data/ccf_offline_stage1_test_revised.csv\")\n",
    "# 预处理\n",
    "prepr_train = prepr(raw_train)\n",
    "prepr_train_online = prepr_online(raw_train_online)\n",
    "prepr_test = prepr(raw_test)\n",
    "\n",
    "# 划分区间\n",
    "# 训练集 历史，中间，标签区间 线下\n",
    "train_history = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/3/2', periods=60))]\n",
    "train_label = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/5/16', periods=31))]\n",
    "#训练集 历史，中间，标签区间 线上\n",
    "train_history_online = prepr_train_online[prepr_train_online['DATE_RECEIVED'].isin(p.date_range('2016/3/2', periods=60))]\n",
    "train_label_online = prepr_train_online[prepr_train_online['DATE_RECEIVED'].isin(p.date_range('2016/5/16', periods=31))]\n",
    "\n",
    "# 验证集 历史，中间，标签区间\n",
    "verification_history = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/1/16', periods=60))]\n",
    "verification_label = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/3/31', periods=31))]\n",
    "\n",
    "# 验证集 历史，中间，标签区间\n",
    "verification_history_online = prepr_train_online[prepr_train_online['DATE_RECEIVED'].isin(p.date_range('2016/1/16', periods=60))]\n",
    "verification_label_online = prepr_train_online[prepr_train_online['DATE_RECEIVED'].isin(p.date_range('2016/3/31', periods=31))]\n",
    "# 测试集 历史，中间，标签区间\n",
    "test_history = prepr_train[prepr_train['DATE_RECEIVED'].isin(p.date_range('2016/4/17', periods=60))]\n",
    "test_history_online = prepr_train_online[prepr_train_online['DATE_RECEIVED'].isin(p.date_range('2016/4/17', periods=60))]\n",
    "test_label = prepr_test.copy()\n",
    "# 构造数据集\n",
    "complete_train = construct_data(train_history,train_history_online, train_label)\n",
    "complete_verification = construct_data(verification_history,verification_history_online, verification_label)\n",
    "complete_test = construct_data(test_history,test_history_online, test_label)\n",
    "good_train = p.concat([complete_train, complete_verification], axis=0)\n",
    "result, feat_importance = lightgbm_model(good_train, complete_test)\n",
    "result.to_csv(\"data/result_0924_01_add_online_features.csv\", index=False, header=None)\n",
    "feat_importance.to_csv(\"data/result_feat_importance_0924_01_add_online_features.csv\", index=False, header=None)"
   ],
   "id": "deead005de45dc28",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:82: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:86: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:146: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:150: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:170: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:174: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:303: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:307: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:328: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:332: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:415: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:419: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:459: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:456: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:471: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:486: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:518: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:534: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:550: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:566: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:586: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:587: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:590: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:591: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:594: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:595: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:599: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:600: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:603: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:604: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:607: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:608: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:614: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:615: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:618: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:619: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:622: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:623: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:82: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:86: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:146: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:150: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:170: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:174: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:303: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:307: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:328: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:332: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:415: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:419: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:459: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:456: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:471: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:486: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:518: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:534: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:550: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:566: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:586: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:587: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:590: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:591: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:594: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:595: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:599: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:600: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:603: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:604: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:607: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:608: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:614: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:615: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:618: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:619: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:622: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:623: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:82: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:86: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:146: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:150: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:170: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:174: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:303: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:307: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:328: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:332: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:415: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  first[prefixs + \"is_first_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:419: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  last[prefixs + \"is_last_received\"] = 1\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\2782501759.py:459: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  l_f['label_Coupon_date_received_False_rank'] = l_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:456: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:471: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:486: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:518: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:534: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:550: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:566: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f[prefixs + 'lu_use'] = list(\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:586: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_true_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:587: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_distance_False_rank'] = h_f.groupby('User_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:590: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_true_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:591: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_discount_rate_False_rank'] = h_f.groupby('User_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:594: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_true_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:595: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_User_date_received_False_rank'] = h_f.groupby('User_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:599: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_true_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:600: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_distance_False_rank'] = h_f.groupby('Merchant_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:603: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_true_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:604: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_discount_rate_False_rank'] = h_f.groupby('Merchant_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:607: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_true_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:608: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Merchant_date_received_False_rank'] = h_f.groupby('Merchant_id')['DATE_RECEIVED'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:614: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_true_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:615: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_distance_False_rank'] = h_f.groupby('Coupon_id')['Distance'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:618: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_true_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:619: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_discount_rate_False_rank'] = h_f.groupby('Coupon_id')['DISCOUNT'].rank(ascending=False)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:622: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_true_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=True)\n",
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_34024\\1253616806.py:623: PerformanceWarning: DataFrame is highly fragmented.  This is usually the result of calling `frame.insert` many times, which has poor performance.  Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`\n",
      "  h_f['label_Coupon_date_received_False_rank'] = h_f.groupby('Coupon_id')['DATE_RECEIVED'].rank(ascending=False)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LightGBM] [Info] Number of positive: 32923, number of negative: 358758\n",
      "[LightGBM] [Info] Auto-choosing col-wise multi-threading, the overhead of testing was 0.513944 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 13953\n",
      "[LightGBM] [Info] Number of data points in the train set: 391681, number of used features: 207\n",
      "[LightGBM] [Info] [binary:BoostFromScore]: pavg=0.084056 -> initscore=-2.388477\n",
      "[LightGBM] [Info] Start training from score -2.388477\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n",
      "[LightGBM] [Warning] No further splits with positive gain, best gain: -inf\n"
     ]
    }
   ],
   "execution_count": 41
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
