{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:25:22.311070Z",
     "iopub.status.busy": "2023-11-13T13:25:22.310785Z",
     "iopub.status.idle": "2023-11-13T13:25:34.971432Z",
     "shell.execute_reply": "2023-11-13T13:25:34.970783Z",
     "shell.execute_reply.started": "2023-11-13T13:25:22.311041Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from datetime import datetime\n",
    "import os\n",
    "\n",
    "stage = 'B'\n",
    "root = '../../../contest'\n",
    "\n",
    "df_train = pd.read_csv(os.path.join(root, 'train/GSLD_TR_APS.csv'))\n",
    "df_test = pd.read_csv(os.path.join(root, '{}/GSLD_TR_APS_{}.csv'.format(stage, stage)))\n",
    "\n",
    "save_path = '../data'\n",
    "\n",
    "end_date_train = datetime(1996,7,5)\n",
    "end_date_test = datetime(1996,9,4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:25:34.972755Z",
     "iopub.status.busy": "2023-11-13T13:25:34.972548Z",
     "iopub.status.idle": "2023-11-13T13:25:34.975578Z",
     "shell.execute_reply": "2023-11-13T13:25:34.974978Z",
     "shell.execute_reply.started": "2023-11-13T13:25:34.972730Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# # B榜APS中插入AGET数据\n",
    "# df_test_aget = pd.read_csv(os.path.join(root, '{}/GSLD_AGET_PAY_{}.csv'.format(stage, stage)))\n",
    "# df_test_aget = df_test_aget[['CUST_NO','DATE','TR_AMT']].drop_duplicates()\n",
    "# # 获取已经存在APS中的AGET记录\n",
    "# tmp_df = df_test_aget.merge(df_test, how='left', on=['CUST_NO','DATE'])\n",
    "# tmp_df = tmp_df[tmp_df.TR_AMT.round(0)==tmp_df.APSDTRAMT.round(0)]\n",
    "# # 需要添加的记录\n",
    "# add_df = df_test_aget.merge(tmp_df, on=['CUST_NO','DATE'], how='left')\n",
    "# add_df = add_df[add_df.APSDTRCOD.isnull()]\n",
    "# add_df = add_df[['CUST_NO','DATE','TR_AMT_x']]\n",
    "# add_df = add_df.rename(columns={'TR_AMT_x':'APSDTRAMT'})\n",
    "# # AGET中条目出现最多次数的类型\n",
    "# add_df['APSDTRCOD'] = '4e351b2ba2d8456a6440a01d7ef370e0'\n",
    "# add_df['APSDABS'] = '900594324039aeb5f9197a859c64b26b'\n",
    "# add_df['APSDTRCHL'] = '6d9345571f7032b7281da7f93d1ffb22'\n",
    "# # 添加add_df到APS\n",
    "# print(df_test.shape)\n",
    "# df_test = pd.concat([df_test, add_df], axis=0)\n",
    "# print(df_test.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:25:34.976703Z",
     "iopub.status.busy": "2023-11-13T13:25:34.976528Z",
     "iopub.status.idle": "2023-11-13T13:25:37.690489Z",
     "shell.execute_reply": "2023-11-13T13:25:37.689859Z",
     "shell.execute_reply.started": "2023-11-13T13:25:34.976681Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "abs_head3_list = df_train.APSDABS.value_counts()[:3].index\n",
    "abs_head9_list = df_train.APSDABS.value_counts()[:9].index # 出现大于100000次\n",
    "aps_cd_list = df_train.APSDTRCOD.value_counts().index\n",
    "chl_head17_list = df_train.APSDTRCHL.value_counts()[:17].index # 出现大于1000次"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:25:37.691806Z",
     "iopub.status.busy": "2023-11-13T13:25:37.691600Z",
     "iopub.status.idle": "2023-11-13T13:25:37.707651Z",
     "shell.execute_reply": "2023-11-13T13:25:37.707010Z",
     "shell.execute_reply.started": "2023-11-13T13:25:37.691782Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "def prepro(df, end_date):\n",
    "    abs_df = df.copy()\n",
    "    \n",
    "    df['DATE'] = pd.to_datetime(df['DATE'], format='%Y%m%d')\n",
    "    df['APSDTRAMT'] = df.APSDTRAMT.apply(lambda x: round(pow(x/3.12,3),2))\n",
    "    df['abs_amt'] = df['APSDTRAMT'].apply(lambda x: abs(x))\n",
    "    df = df.groupby('CUST_NO').agg(\n",
    "        aps_sum_amt = ('APSDTRAMT', 'sum'),\n",
    "        aps_sum_abs_amt = ('abs_amt', 'sum'),\n",
    "        aps_sum_avg_amt = ('APSDTRAMT', 'mean'),\n",
    "        aps_max_amt = ('APSDTRAMT', 'max'),\n",
    "        aps_min_amt = ('APSDTRAMT', 'min'),\n",
    "        aps_last_diff = ('DATE', 'max'),\n",
    "        aps_code_cnt = ('APSDTRCOD', 'nunique'),\n",
    "        aps_chl_cnt = ('APSDTRCHL', 'nunique'),\n",
    "        aps_abs_cnt = ('APSDABS', 'nunique'),\n",
    "    )\n",
    "    df['aps_last_diff'] = (end_date - df['aps_last_diff']).dt.days\n",
    "    \n",
    "    # 不同交易摘要的最大金额\n",
    "    for i, abs3 in enumerate(abs_head3_list):\n",
    "        tmp_df = abs_df[abs_df['APSDABS']==abs3].groupby('CUST_NO').agg({'APSDTRAMT':'max'}).rename(columns={'APSDTRAMT':'aps_abs'+str(i+1)+'_maxamt'})\n",
    "        df = df.merge(tmp_df, how='left', on='CUST_NO')\n",
    "    \n",
    "    # 不同交易摘要的最小金额\n",
    "    for i, abs3 in enumerate(abs_head3_list):\n",
    "        tmp_df = abs_df[abs_df['APSDABS']==abs3].groupby('CUST_NO').agg({'APSDTRAMT':'min'}).rename(columns={'APSDTRAMT':'aps_abs'+str(i+1)+'_minamt'})\n",
    "        df = df.merge(tmp_df, how='left', on='CUST_NO')\n",
    "\n",
    "    # 不同交易摘要的出现次数\n",
    "    for i, abs9 in enumerate(abs_head9_list):\n",
    "        tmp_df = abs_df[abs_df['APSDABS']==abs9].groupby('CUST_NO').agg({'APSDTRAMT':'count'}).rename(columns={'APSDTRAMT':'aps_abs'+str(i+1)+'_cnt'})\n",
    "        df = df.merge(tmp_df, how='left', on='CUST_NO')\n",
    "\n",
    "    # 交易码是否出现过\n",
    "    for i, cd in enumerate(aps_cd_list):\n",
    "        col_name = 'aps_code'+str(i+1)+'_ind'\n",
    "        tmp_df = abs_df[abs_df['APSDTRCOD']==cd].groupby('CUST_NO').head(1)\n",
    "        tmp_df[col_name] = 1\n",
    "        df = df.merge(tmp_df[['CUST_NO', col_name]], how='left', on='CUST_NO')\n",
    "        df[col_name] = df[col_name].fillna(0)\n",
    "\n",
    "    # 交易渠道是否出现过\n",
    "    for i, chl in enumerate(chl_head17_list):\n",
    "        col_name = 'aps_chl'+str(i+1)+'_ind'\n",
    "        tmp_df = abs_df[abs_df['APSDTRCHL']==chl].groupby('CUST_NO').head(1)\n",
    "        tmp_df[col_name] = 1\n",
    "        df = df.merge(tmp_df[['CUST_NO', col_name]], how='left', on='CUST_NO')\n",
    "        df[col_name] = df[col_name].fillna(0)\n",
    "\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "tags": []
   },
   "source": [
    "# 全部数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:25:39.881050Z",
     "iopub.status.busy": "2023-11-13T13:25:39.880781Z",
     "iopub.status.idle": "2023-11-13T13:26:47.890435Z",
     "shell.execute_reply": "2023-11-13T13:26:47.889788Z",
     "shell.execute_reply.started": "2023-11-13T13:25:39.881016Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "df_train_all = prepro(df_train, end_date_train)\n",
    "df_test_all = prepro(df_test, end_date_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:26:47.891816Z",
     "iopub.status.busy": "2023-11-13T13:26:47.891617Z",
     "iopub.status.idle": "2023-11-13T13:26:47.895632Z",
     "shell.execute_reply": "2023-11-13T13:26:47.895020Z",
     "shell.execute_reply.started": "2023-11-13T13:26:47.891793Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# # 最近1周\n",
    "\n",
    "# df_train_1w = df_train[df_train.DATE>='19960629'].copy()\n",
    "# df_test_1w = df_test[df_test.DATE>='19960829'].copy()\n",
    "# df_train_1w = prepro(df_train_1w, end_date_train)\n",
    "# df_test_1w = prepro(df_test_1w, end_date_test)\n",
    "# df_1w  = pd.concat([df_train_1w, df_test_1w], axis=0, ignore_index=True)\n",
    "# del df_1w['aps_last_diff']\n",
    "\n",
    "# columns = df_1w.columns\n",
    "# new_column_names =[]\n",
    "# for i, column in enumerate(columns):\n",
    "#     new_column_names.append(column.replace('aps_','aps1w_'))\n",
    "\n",
    "# df_1w.columns = new_column_names\n",
    "\n",
    "# df_train_all = df_train_all.merge(df_1w, how='left', on='CUST_NO')\n",
    "# df_test_all = df_test_all.merge(df_1w, how='left', on='CUST_NO')\n",
    "\n",
    "# # 最近2周\n",
    "\n",
    "# df_train_2w = df_train[df_train.DATE>='19960622'].copy()\n",
    "# df_test_2w = df_test[df_test.DATE>='19960822'].copy()\n",
    "# df_train_2w = prepro(df_train_2w, end_date_train)\n",
    "# df_test_2w = prepro(df_test_2w, end_date_test)\n",
    "# df_2w  = pd.concat([df_train_2w, df_test_2w], axis=0, ignore_index=True)\n",
    "# del df_2w['aps_last_diff']\n",
    "\n",
    "# columns = df_2w.columns\n",
    "# new_column_names =[]\n",
    "# for i, column in enumerate(columns):\n",
    "#     new_column_names.append(column.replace('aps_','aps2w_'))\n",
    "\n",
    "# df_2w.columns = new_column_names\n",
    "\n",
    "# df_train_all = df_train_all.merge(df_2w, how='left', on='CUST_NO')\n",
    "# df_test_all = df_test_all.merge(df_2w, how='left', on='CUST_NO')\n",
    "\n",
    "# # 最近1个月\n",
    "\n",
    "# df_train_1m = df_train[df_train.DATE>='19960605'].copy()\n",
    "# df_test_1m = df_test[df_test.DATE>='19960804'].copy()\n",
    "# df_train_1m = prepro(df_train_1m, end_date_train)\n",
    "# df_test_1m = prepro(df_test_1m, end_date_test)\n",
    "# df_1m  = pd.concat([df_train_1m, df_test_1m], axis=0, ignore_index=True)\n",
    "# del df_1m['aps_last_diff']\n",
    "\n",
    "# columns = df_1m.columns\n",
    "# new_column_names =[]\n",
    "# for i, column in enumerate(columns):\n",
    "#     new_column_names.append(column.replace('aps_','aps1m_'))\n",
    "\n",
    "# df_1m.columns = new_column_names\n",
    "\n",
    "# df_train_all = df_train_all.merge(df_1m, how='left', on='CUST_NO')\n",
    "# df_test_all = df_test_all.merge(df_1m, how='left', on='CUST_NO')\n",
    "\n",
    "# # 最近2个月\n",
    "\n",
    "# df_train_2m = df_train[df_train.DATE>='19960505'].copy()\n",
    "# df_test_2m = df_test[df_test.DATE>='19960704'].copy()\n",
    "# df_train_2m = prepro(df_train_2m, end_date_train)\n",
    "# df_test_2m = prepro(df_test_2m, end_date_test)\n",
    "# df_2m  = pd.concat([df_train_2m, df_test_2m], axis=0, ignore_index=True)\n",
    "# del df_2m['aps_last_diff']\n",
    "\n",
    "# columns = df_2m.columns\n",
    "# new_column_names =[]\n",
    "# for i, column in enumerate(columns):\n",
    "#     new_column_names.append(column.replace('aps_','aps2m_'))\n",
    "\n",
    "# df_2m.columns = new_column_names\n",
    "\n",
    "# df_train_all = df_train_all.merge(df_2m, how='left', on='CUST_NO')\n",
    "# df_test_all = df_test_all.merge(df_2m, how='left', on='CUST_NO')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-13T13:26:47.896796Z",
     "iopub.status.busy": "2023-11-13T13:26:47.896621Z",
     "iopub.status.idle": "2023-11-13T13:26:51.163908Z",
     "shell.execute_reply": "2023-11-13T13:26:51.163284Z",
     "shell.execute_reply.started": "2023-11-13T13:26:47.896774Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "df_train_all.to_csv(save_path+'/GSLD_TR_APS.csv', index=False)\n",
    "df_test_all.to_csv(save_path+'/GSLD_TR_APS_{}.csv'.format(stage), index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
