{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "###########调包\n",
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from datetime import *\n",
    "import time\n",
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "############数据文件文件路径\n",
    "train_dir = '../../contest/train/'\n",
    "B_dir = '../../contest/B榜/'\n",
    "train_pickle_dir = './pickle/train/'\n",
    "B_pickle_dir = './pickle/B/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def count_notzero(series_x):\n",
    "    mode = series_x[(series_x > 0)]\n",
    "    return mode.count()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def 加工企业流水1():\n",
    "    res = []\n",
    "    for data_dir,pickle_dir in [(train_dir,train_pickle_dir),(B_dir,B_pickle_dir)]:\n",
    "        if data_dir==train_dir:\n",
    "            对公流水_T0 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL.csv'))    \n",
    "            对公流水_T0.columns = ['客户ID','交易日期','交易金额','交易对手客户编号']\n",
    "        else:\n",
    "            对公流水_T0 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL_B.csv'))    \n",
    "            对公流水_T0.columns = ['客户ID','交易日期','交易金额','交易对手客户编号']\n",
    "\n",
    "        对公流水_T0['交易金额'] = pow((对公流水_T0['交易金额'])/3.12,3).round(2)\n",
    "\n",
    "        对公流水_T0['交易日期'] = 对公流水_T0['交易日期'].astype('str')\n",
    "        对公流水_T0['交易日期'] = 对公流水_T0['交易日期'].astype('datetime64[ns]')\n",
    "        对公流水_T0['交易日期']=pd.to_datetime(对公流水_T0['交易日期'])+pd.DateOffset(days=11886)\n",
    "        对公流水_T0.sort_values(['客户ID','交易日期'],inplace=True,ascending=True)\n",
    "        对公流水_T0['交易日期'] = 对公流水_T0['交易日期'].astype('str')\n",
    "        \n",
    "        对公流水_T0['交易对手客户编号'].fillna('无',inplace=True)\n",
    "        对公流水_T0['流入金额'] =np.where((对公流水_T0['交易金额']>=0),对公流水_T0['交易金额'],0)\n",
    "        对公流水_T0['流出金额'] =np.where((对公流水_T0['交易金额']<=0),对公流水_T0['交易金额']*-1,0)\n",
    "\n",
    "        对公流水_单日汇总_T1=对公流水_T0.groupby(['客户ID','交易日期']).agg({\\\n",
    "\t\t'流入金额':['sum',count_notzero],'流出金额':['sum',count_notzero]})\n",
    "        对公流水_单日汇总_T1.reset_index(inplace=True)\n",
    "        对公流水_单日汇总_T1.columns = ['客户ID','交易日期','流入金额','流入笔数','流出金额','流出笔数']\n",
    "\n",
    "        对公流水_单日汇总_T1['最大日期']='2021-03-31'\n",
    "        对公流水_单日汇总_T1['最大日期'] = 对公流水_单日汇总_T1['最大日期'].astype('datetime64[ns]')\n",
    "        对公流水_单日汇总_T1['交易日期'] = 对公流水_单日汇总_T1['交易日期'].astype('datetime64[ns]')\n",
    "        对公流水_单日汇总_T1['交易离最大日期天']= 对公流水_单日汇总_T1.apply(lambda x:(x['最大日期']-x['交易日期']).days, axis=1)\n",
    "        对公流水_单日汇总_T1.drop(['最大日期'],axis=1,inplace=True)\n",
    "\n",
    "        pickle.dump(对公流水_单日汇总_T1, open(pickle_dir+'对公流水_临时表.p', 'wb'))\n",
    "        res.append(对公流水_单日汇总_T1)\n",
    "    return "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def 加工企业流水2():\n",
    "    res = []\n",
    "    for data_dir,pickle_dir in [(train_dir,train_pickle_dir),(B_dir,B_pickle_dir)]:\n",
    "        if data_dir==train_dir:\n",
    "            对公流水_T0 = pickle.load(open(pickle_dir+'对公流水_临时表.p', 'rb'))\n",
    "        else:\n",
    "            对公流水_T0 = pickle.load(open(pickle_dir+'对公流水_临时表.p', 'rb'))\n",
    "\n",
    "        对公流水_T0['交易月']=pd.DatetimeIndex(对公流水_T0['交易日期']).month\n",
    "\n",
    "        对公流水_按月汇总_T1=对公流水_T0.groupby(['客户ID','交易月']).agg({\\\n",
    "\t\t'流入金额':['sum'],'流入笔数':['sum'],'流出金额':['sum'],'流出笔数':['sum'],'交易日期':['count']})\n",
    "        对公流水_按月汇总_T1.reset_index(inplace=True)\n",
    "        对公流水_按月汇总_T1.columns = ['客户ID','交易月','月流入金额','月流入笔数','月流出金额','月流出笔数','月交易天数']\n",
    "\n",
    "        对公流水_汇总_T1=对公流水_按月汇总_T1.groupby(['客户ID']).agg({\\\n",
    "\t\t'月流入金额':['sum'],'月流入笔数':['sum'],'月流出金额':['sum'],'月流出笔数':['sum'],'月交易天数':['sum']})\n",
    "        对公流水_汇总_T1.reset_index(inplace=True)\n",
    "        对公流水_汇总_T1.columns = ['客户ID','总流入金额','总流入笔数','总流出金额','总流出笔数','总交易天数']\n",
    "\n",
    "        对公流水_汇总_T1['总净流']=对公流水_汇总_T1['总流入金额']-对公流水_汇总_T1['总流出金额']\n",
    "        对公流水_汇总_T1['总金额']=对公流水_汇总_T1['总流入金额']+对公流水_汇总_T1['总流出金额']\n",
    "\t\t\n",
    "        三月汇总_T0= 对公流水_按月汇总_T1.loc[对公流水_按月汇总_T1['交易月'] == 3]\n",
    "        三月汇总_T0.drop(['交易月'],axis=1,inplace=True)\n",
    "        三月汇总_T0.columns = ['客户ID','3月流入金额','3月流入笔数','3月流出金额','3月流出笔数','3月交易天数']\n",
    "        三月汇总_T0['3月净流']=三月汇总_T0['3月流入金额']-三月汇总_T0['3月流出金额']\n",
    "        三月汇总_T0['3月总金额']=三月汇总_T0['3月流入金额']+三月汇总_T0['3月流出金额']\n",
    "\n",
    "        if data_dir==train_dir:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID','违约标记']\n",
    "            目标客户列表.drop(['违约标记'],axis=1,inplace=True)\n",
    "        else:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET_B.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID']\n",
    "\n",
    "        对公流水特征=目标客户列表.merge(对公流水_汇总_T1,on=['客户ID'],how='left')\n",
    "        对公流水特征=对公流水特征.merge(三月汇总_T0,on=['客户ID'],how='left')\n",
    "        对公流水特征.drop(['借款合同编号','纳税人识别号','法定代表人客户ID'],axis=1,inplace=True)\n",
    "\n",
    "        pickle.dump(对公流水特征, open(pickle_dir+'对公流水特征.p', 'wb'))\n",
    "        res.append(对公流水特征)\n",
    "    return res[0],res[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def 加工企业流水_补充():\n",
    "    res = []\n",
    "    for data_dir,pickle_dir in [(train_dir,train_pickle_dir),(B_dir,B_pickle_dir)]:\n",
    "        if data_dir==train_dir:\n",
    "            对公流水_T0 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL.csv'))    \n",
    "            对公流水_T0.columns = ['客户ID','交易日期','交易金额','交易对手客户编号']\n",
    "        else:\n",
    "            对公流水_T0 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL_B.csv'))    \n",
    "            对公流水_T0.columns = ['客户ID','交易日期','交易金额','交易对手客户编号']\n",
    "\n",
    "        对公流水_T0['交易金额'] = pow((对公流水_T0['交易金额'])/3.12,3).round(2)\n",
    "        对公流水_T0['交易金额1']=对公流水_T0['交易金额']//10000\n",
    "        对公流水_T0['交易对手客户编号'].fillna('无',inplace=True)\n",
    "\n",
    "        相关客户_T0=对公流水_T0.groupby(['客户ID']).agg({'交易对手客户编号':['nunique']})\n",
    "        相关客户_T0.reset_index(inplace=True)\n",
    "        相关客户_T0.columns = ['客户ID','相关客户数']\n",
    "\t\t\n",
    "        if data_dir==train_dir:\n",
    "            对公流水_T1 = pickle.load(open(pickle_dir+'对公流水_临时表.p', 'rb'))\n",
    "        else:\n",
    "            对公流水_T1 = pickle.load(open(pickle_dir+'对公流水_临时表.p', 'rb'))\n",
    "\t\t\t\n",
    "        对公流水_T2=对公流水_T1.groupby(['客户ID']).agg({'交易日期':['max']})\n",
    "        对公流水_T2.reset_index(inplace=True)\n",
    "        对公流水_T2.columns = ['客户ID','交易日期']\n",
    "        \n",
    "        对公流水_T3=对公流水_T1.merge(对公流水_T2,on=['客户ID','交易日期'],how='inner')\n",
    "        对公流水_T3['最后一天总净流']=对公流水_T3['流入金额']-对公流水_T3['流出金额']\n",
    "        对公流水_T3['最后一天总金额']=对公流水_T3['流入金额']+对公流水_T3['流出金额']\n",
    "\n",
    "        对公流水_T3.drop(['交易日期','流入金额','流出金额'],axis=1,inplace=True)\n",
    "\n",
    "        对公流水_T3.columns = ['客户ID','最后一天流入笔数','最后一天流出笔数','最后一天日期差','最后一天总净流','最后一天总金额']\n",
    "\n",
    "        if data_dir==train_dir:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID','违约标记']\n",
    "            目标客户列表.drop(['违约标记'],axis=1,inplace=True)\n",
    "        else:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET_B.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID']\n",
    "\n",
    "        对公流水补充=目标客户列表.merge(对公流水_T3,on=['客户ID'],how='left')\n",
    "        对公流水补充=对公流水补充.merge(相关客户_T0,on=['客户ID'],how='left')\n",
    "        对公流水补充.drop(['借款合同编号','纳税人识别号','法定代表人客户ID'],axis=1,inplace=True)\n",
    "\n",
    "        pickle.dump(对公流水补充, open(pickle_dir+'对公流水补充.p', 'wb'))\n",
    "        res.append(对公流水补充)\n",
    "    return res[0],res[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def 加工企业流水_补充2():\n",
    "    res = []\n",
    "    for data_dir,pickle_dir in [(train_dir,train_pickle_dir),(B_dir,B_pickle_dir)]:\n",
    "        if data_dir==train_dir:\n",
    "            对公流水_T0 = pickle.load(open(pickle_dir+'对公流水_临时表.p', 'rb'))\n",
    "        else:\n",
    "            对公流水_T0 = pickle.load(open(pickle_dir+'对公流水_临时表.p', 'rb'))\n",
    "\n",
    "        对公流水_T0['交易总金额'] =对公流水_T0['流入金额'] + 对公流水_T0['流出金额'] \n",
    "        对公流水_T1= 对公流水_T0.loc[对公流水_T0['交易总金额']>100]\n",
    "        对公流水_T1['交易笔数'] =对公流水_T0['流入笔数'] + 对公流水_T0['流出笔数'] \n",
    "        对公流水_T1['交易间隔'] =对公流水_T1.groupby(['客户ID'])['交易离最大日期天'].diff(1)\n",
    "        #对公流水_T1['是否工作日'] =对公流水_T1.apply(lambda x:is_workday(x['交易日期']), axis=1)\n",
    "        #对公流水_T1['是否工作日'] =np.where((对公流水_T1['是否工作日']==False),'休息日','工作日')\n",
    "\t\t\n",
    "        对公流水_T2=对公流水_T1.groupby(['客户ID']).agg({\\\n",
    "\t\t'交易总金额':['min','max'],'交易间隔':['max','min','std'],'交易笔数':['max','mean']})\n",
    "        对公流水_T2.reset_index(inplace=True)\n",
    "        对公流水_T2.columns = ['客户ID','单日交易金额_min','单日交易金额_max','交易间隔_max','交易间隔_min'\\\n",
    "\t\t,'交易间隔_std','交易笔数_max','交易笔数_mean']\n",
    "\t\t\n",
    "        #对公流水_T3=对公流水_T1.groupby(['客户ID','是否工作日']).agg({\\\n",
    "\t\t#'交易总金额':['sum'],'交易笔数':['sum']})\n",
    "        #对公流水_T3.reset_index(inplace=True)\n",
    "        #对公流水_T3.columns = ['客户ID','是否工作日','交易总金额','交易笔数']\n",
    "\t\t\n",
    "        对公流水_T4= 对公流水_T0.loc[对公流水_T0['交易离最大日期天']==0]\n",
    "        对公流水_T4.drop(['交易日期','交易离最大日期天'],axis=1,inplace=True)\n",
    "        对公流水_T4.columns = ['客户ID','月底流入金额','月底流入笔数','月底流出金额','月底流出笔数'\\\n",
    "\t\t,'月底总金额']\n",
    "\t\t\n",
    "        if data_dir==train_dir:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID','违约标记']\n",
    "            目标客户列表.drop(['违约标记'],axis=1,inplace=True)\n",
    "        else:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET_B.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID']\n",
    "\n",
    "        对公流水补充=目标客户列表.merge(对公流水_T2,on=['客户ID'],how='left')\n",
    "        对公流水补充=对公流水补充.merge(对公流水_T4,on=['客户ID'],how='left')\n",
    "        对公流水补充.drop(['借款合同编号','纳税人识别号','法定代表人客户ID'],axis=1,inplace=True)\n",
    "\n",
    "        pickle.dump(对公流水补充, open(pickle_dir+'对公流水补充2.p', 'wb'))\n",
    "        res.append(对公流水补充)\n",
    "    return res[0],res[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "加工企业流水1()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-5-aadbd3413db8>:25: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  三月汇总_T0.drop(['交易月'],axis=1,inplace=True)\n",
      "<ipython-input-5-aadbd3413db8>:27: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  三月汇总_T0['3月净流']=三月汇总_T0['3月流入金额']-三月汇总_T0['3月流出金额']\n",
      "<ipython-input-5-aadbd3413db8>:28: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  三月汇总_T0['3月总金额']=三月汇总_T0['3月流入金额']+三月汇总_T0['3月流出金额']\n",
      "<ipython-input-5-aadbd3413db8>:25: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  三月汇总_T0.drop(['交易月'],axis=1,inplace=True)\n",
      "<ipython-input-5-aadbd3413db8>:27: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  三月汇总_T0['3月净流']=三月汇总_T0['3月流入金额']-三月汇总_T0['3月流出金额']\n",
      "<ipython-input-5-aadbd3413db8>:28: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  三月汇总_T0['3月总金额']=三月汇总_T0['3月流入金额']+三月汇总_T0['3月流出金额']\n"
     ]
    }
   ],
   "source": [
    "企业流水训练集,企业流水测试集=加工企业流水2()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 15)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水训练集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5939, 15)"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水测试集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "企业流水补充训练集,企业流水补充测试集=加工企业流水_补充()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 7)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水补充训练集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5939, 7)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水补充测试集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-7-a625e803f0ee>:11: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  对公流水_T1['交易笔数'] =对公流水_T0['流入笔数'] + 对公流水_T0['流出笔数']\n",
      "<ipython-input-7-a625e803f0ee>:12: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  对公流水_T1['交易间隔'] =对公流水_T1.groupby(['客户ID'])['交易离最大日期天'].diff(1)\n",
      "<ipython-input-7-a625e803f0ee>:28: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  对公流水_T4.drop(['交易日期','交易离最大日期天'],axis=1,inplace=True)\n",
      "<ipython-input-7-a625e803f0ee>:11: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  对公流水_T1['交易笔数'] =对公流水_T0['流入笔数'] + 对公流水_T0['流出笔数']\n",
      "<ipython-input-7-a625e803f0ee>:12: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  对公流水_T1['交易间隔'] =对公流水_T1.groupby(['客户ID'])['交易离最大日期天'].diff(1)\n",
      "<ipython-input-7-a625e803f0ee>:28: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  对公流水_T4.drop(['交易日期','交易离最大日期天'],axis=1,inplace=True)\n"
     ]
    }
   ],
   "source": [
    "企业流水补充2训练集,企业流水补充2测试集=加工企业流水_补充2()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 13)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水补充2训练集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5939, 13)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水补充2测试集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def 加工企业流水_补充2():\n",
    "    res = []\n",
    "    for data_dir,pickle_dir in [(train_dir,train_pickle_dir),(B_dir,B_pickle_dir)]:\n",
    "        if data_dir==train_dir:\n",
    "            企业交易明细表 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL.csv'))    \n",
    "        else:\n",
    "            企业交易明细表 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL_B.csv'))    \n",
    "            \n",
    "        企业交易明细表.columns = ['客户ID','交易日期','交易金额','交易对手客户编号']\t\n",
    "\t\t\n",
    "        企业交易明细表['客户ID'] = 企业交易明细表['客户ID'].astype('str')\n",
    "        企业交易明细表['交易金额'] = pow((企业交易明细表['交易金额'])/3.12,3)\n",
    "        企业交易明细表['交易日期'] = 企业交易明细表['交易日期'].astype('str')\n",
    "        企业交易明细表['交易日期'] = 企业交易明细表['交易日期'].astype('datetime64[ns]')\n",
    "        企业交易明细表['交易日期']=pd.to_datetime(企业交易明细表['交易日期'])+pd.DateOffset(days=11886)\n",
    "\n",
    "        #新增绝对值相关std\n",
    "        企业交易T0 = 企业交易明细表.copy()\n",
    "        企业交易T0['企业金额绝对值'] =np.where((企业交易明细表['交易金额']>=0),企业交易明细表['交易金额'],企业交易明细表['交易金额']*-1)\n",
    "        企业交易T0 = 企业交易T0.groupby(['客户ID']).agg({'企业金额绝对值':['max','min','mean','std']})\n",
    "        企业交易T0.reset_index(inplace = True)\n",
    "        企业交易T0.columns=['客户ID','企业交易绝对值最高金额','企业交易绝对值最低金额','企业交易绝对值_mean','企业交易绝对值_std']\n",
    "        \n",
    "        企业交易T1 = 企业交易明细表.groupby(['客户ID']).agg({'交易金额':['sum','max','min','count','mean','std']})\n",
    "        企业交易T1.reset_index(inplace = True)\n",
    "        企业交易T1.columns=['客户ID','企业交易金额汇总','企业交易最高金额','企业交易最低金额','企业交易笔数','企业交易_mean','企业交易_std']\n",
    "        \n",
    "        企业交易T2 = 企业交易明细表.drop(labels = ['交易对手客户编号','交易金额'],axis=1)\n",
    "        企业交易T2 = 企业交易T2.drop_duplicates()\n",
    "        企业交易T3 = 企业交易T2.groupby(['客户ID']).agg({'交易日期':['count','max','min']})\n",
    "        企业交易T3.reset_index(inplace = True)\n",
    "        企业交易T3.columns=['客户ID','企业交易天数','企业交易日期max','企业交易日期min']\n",
    "        企业交易T3['企业时间跨度']= 企业交易T3.apply(lambda x:(x['企业交易日期max']-x['企业交易日期min']).days, axis=1)\n",
    "        \n",
    "        企业交易T4=企业交易明细表.loc[企业交易明细表['交易金额']>=0]\n",
    "        企业交易T4=企业交易T4.groupby(['客户ID']).agg({'交易金额':['sum','count','max','min','mean','std']})\n",
    "        企业交易T4.reset_index(inplace=True)\n",
    "        企业交易T4.columns=['客户ID','企业流入总额','企业流入笔数','企业流入max','企业流入min','企业流入mean','企业流入std']\n",
    "        \n",
    "        企业交易T5=企业交易明细表.loc[企业交易明细表['交易金额']<0]\n",
    "        企业交易T5=企业交易T5.groupby(['客户ID']).agg({'交易金额':['sum','count','max','min','mean','std']})\n",
    "        企业交易T5.reset_index(inplace=True)\n",
    "        企业交易T5.columns=['客户ID','企业流出总额','企业流出笔数','企业流出max','企业流出min','企业流出mean','企业流出std']\n",
    "        \n",
    "        企业交易T6 = 企业交易T1.merge(企业交易T3,on='客户ID',how='left')\n",
    "        企业交易T7 = 企业交易T6.merge(企业交易T4,on='客户ID',how='left')\n",
    "        企业交易T8 = 企业交易T7.merge(企业交易T5,on='客户ID',how='left')\n",
    "        \n",
    "        if data_dir==train_dir:\n",
    "            目标客户表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET.csv'))\n",
    "            目标客户表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID','违约标记']\n",
    "            目标客户表.drop(['违约标记'],axis=1,inplace=True)\n",
    "        else:\n",
    "            目标客户表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET_B.csv'))\n",
    "            目标客户表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID']\n",
    "\t\t\n",
    "        企业交易宽表 = 目标客户表.merge(企业交易T8,on='客户ID',how='left')\n",
    "        企业交易宽表 = 企业交易宽表.drop(labels = ['纳税人识别号','法定代表人客户ID','借款合同编号','企业交易日期max','企业交易日期min'],axis=1)\n",
    "        企业交易宽表['客户ID'] = 企业交易宽表['客户ID'].astype('str')\n",
    "\n",
    "        pickle.dump(企业交易宽表, open(pickle_dir+'企业交易宽表DJY.p', 'wb'))\n",
    "\n",
    "        res.append(企业交易宽表)\n",
    "    return res[0],res[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "企业流水补充2训练集,企业流水补充2测试集 = 加工企业流水_补充2()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 21)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水补充2训练集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(5939, 21)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "企业流水补充2测试集.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "#加工企业交易明细表训练集、测试集\n",
    "def 加工企业交易明细特征补充():\n",
    "    res = [] \n",
    "    for data_dir,pickle_dir in [(train_dir,train_pickle_dir),(B_dir,B_pickle_dir)]:\n",
    "        if data_dir==train_dir:\n",
    "            流水_T0 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL.csv'))\n",
    "        else:\n",
    "            流水_T0 = pd.read_csv(os.path.join(data_dir,'XW_CUST_TR_DTAL_B.csv'))\n",
    "\n",
    "        流水_T0.columns = ['客户ID','数据日期','交易金额','交易对手客户编号']\n",
    "                \n",
    "        流水_T0.sort_values(['客户ID','数据日期'])\n",
    "        \n",
    "        流水_T0['数据日期']= 流水_T0['数据日期'].astype('str')\n",
    "        流水_T0['数据日期'] = 流水_T0['数据日期'].astype('datetime64[ns]')\n",
    "        流水_T0['数据日期']=pd.to_datetime(流水_T0['数据日期'])+pd.DateOffset(days=11886)\n",
    "        \n",
    "        流水_T0['交易金额'] = pow((流水_T0['交易金额'])/3.12,3).round(2)\n",
    "        \n",
    "        流水_T0['交易金额绝对值'] = 流水_T0['交易金额'].apply(lambda x:x if x>0 else x*(-1))\n",
    "        \n",
    "        流水_T0['是否最多对手客户'] = 流水_T0['交易对手客户编号'].astype('str').apply(lambda x:1 if x =='223cbbe577189d1f5c32e9045320716a'  else 0)\n",
    "        \n",
    "        #1流入 2流出\n",
    "        流水_T0['交易标识']= 流水_T0['交易金额'].apply(lambda x:1 if x>0 else 2)\n",
    "        流入_T0 = 流水_T0[流水_T0['交易标识']==1]\n",
    "        流出_T0 = 流水_T0[流水_T0['交易标识']==2]\n",
    "\n",
    "        流入_T1=流入_T0.groupby(['客户ID']).agg({'交易金额':['sum','count'], '是否最多对手客户':['sum']})   \n",
    "        流入_T1.reset_index(inplace=True)\n",
    "        流入_T1.columns = ['客户ID','企业总转入金额','企业总转入笔数','是否最多对手客户sum']\n",
    "\n",
    "        流水_T1=流水_T0.groupby(['客户ID']).agg({'交易金额':['sum','count'],'是否最多对手客户':['sum'],'交易金额绝对值':['sum']})\n",
    "        流水_T1.reset_index(inplace=True)\n",
    "        流水_T1.columns = ['客户ID','企业总交易金额','企业总交易笔数','是否最多对手客户sum总额','交易金额绝对值sum']\n",
    "        \n",
    "        流入_T2=流入_T0.groupby(['客户ID','数据日期']).agg({'交易金额':['sum']})\n",
    "        流入_T2.reset_index(inplace=True)\n",
    "        流入_T2.columns = ['客户ID','数据日期' ,'日流入交易金额']\n",
    "        流入_T3=流入_T2.groupby(['客户ID']).agg({'日流入交易金额':['min','mean']})\n",
    "        流入_T3.reset_index(inplace=True)\n",
    "        流入_T3.columns = ['客户ID','最小日流入交易金额','日流入交易金额_mean']\n",
    "        \n",
    "        if data_dir==train_dir:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID','违约标记']\n",
    "            目标客户列表.drop(['违约标记'],axis=1,inplace=True)\n",
    "        else:\n",
    "            目标客户列表 = pd.read_csv(os.path.join(data_dir,'XW_TARGET_B.csv'))\n",
    "            目标客户列表.columns = ['借款合同编号','客户ID','纳税人识别号','法定代表人客户ID']\n",
    "        \n",
    "        企业交易明细特征=目标客户列表.merge(流入_T3,on=['客户ID'],how='left')\n",
    "\n",
    "        企业交易明细特征=企业交易明细特征.merge(流水_T1,on=['客户ID'],how='left')\n",
    "        企业交易明细特征=企业交易明细特征.merge(流入_T1,on=['客户ID'],how='left')\n",
    "        企业交易明细特征.drop(['借款合同编号','纳税人识别号','法定代表人客户ID'],axis=1,inplace=True)\n",
    "        pickle.dump(企业交易明细特征, open(pickle_dir+'Z企业交易明细特征.p', 'wb'))\n",
    "        res.append(企业交易明细特征)\n",
    "    return res[0],res[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "企业交易_训练集,企业交易_测试集= 加工企业交易明细特征补充()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
