{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:40:20.780316Z",
     "iopub.status.busy": "2023-11-07T03:40:20.780013Z",
     "iopub.status.idle": "2023-11-07T03:40:23.815101Z",
     "shell.execute_reply": "2023-11-07T03:40:23.814486Z",
     "shell.execute_reply.started": "2023-11-07T03:40:20.780282Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "import os\n",
    "import pickle\n",
    "\n",
    "import warnings\n",
    "import random\n",
    "from tqdm import tqdm\n",
    "\n",
    "import datetime\n",
    "import tqdm\n",
    "\n",
    "import imp\n",
    "import utils\n",
    "imp.reload(utils)\n",
    "\n",
    "\n",
    "## 模型\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "import joblib\n",
    "import gc\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "\n",
    "#from category_encoders.target_encoder import TargetEncoder\n",
    "from catboost import CatBoostClassifier\n",
    "from catboost import Pool\n",
    "\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "\n",
    "import xgboost as xgb\n",
    "from sklearn.metrics import f1_score\n",
    "from sklearn.model_selection import StratifiedKFold,train_test_split\n",
    "from sklearn.metrics import roc_curve, auc\n",
    "\n",
    "import pickle\n",
    "\n",
    "\n",
    "from sklearn.metrics import classification_report,precision_score,recall_score,f1_score,precision_recall_curve\n",
    "from sklearn.metrics import roc_curve, auc,roc_auc_score\n",
    "\n",
    "pd.options.display.max_columns = 500\n",
    "pd.options.display.max_rows = 500"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 读取数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:40:23.816444Z",
     "iopub.status.busy": "2023-11-07T03:40:23.816233Z",
     "iopub.status.idle": "2023-11-07T03:41:00.917427Z",
     "shell.execute_reply": "2023-11-07T03:41:00.916753Z",
     "shell.execute_reply.started": "2023-11-07T03:40:23.816419Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "MBANK_TRNFLW_QZ.shape: (1277701, 6)\n",
      "EBANK_CSTLOGQUERY_QZ.shape: (3175488, 3)\n",
      "DP_CUST_SUM_QZ.shape: (42061, 10)\n",
      "TARGET_QZ.shape: (42139, 4)\n",
      "MBANK_QRYTRNFLW_QZ.shape: (7459576, 3)\n",
      "EBANK_CSTLOGQUERY_QZ.shape: (3175488, 3)\n",
      "TAGS_PROD_HOLD_QZ.shape: (42139, 17)\n",
      "APS_QZ.shape: (6019921, 6)\n",
      "CUST_FA_SUM_QZ.shape: (42139, 14)\n"
     ]
    }
   ],
   "source": [
    "# APS_QZ_A = pd.read_csv('../contest/A/APS_QZ_A.csv')\n",
    "# CUST_FA_SUM_QZ_A = pd.read_csv('../contest/A/CUST_FA_SUM_QZ_A.csv')\n",
    "# DP_CUST_SUM_QZ_A = pd.read_csv('../contest/A/DP_CUST_SUM_QZ_A.csv')\n",
    "# EBANK_CSTLOG_QZ_A = pd.read_csv('../contest/A/EBANK_CSTLOG_QZ_A.csv')\n",
    "# EBANK_CSTLOGQUERY_QZ_A = pd.read_csv('../contest/A/EBANK_CSTLOGQUERY_QZ_A.csv')\n",
    "# MBANK_QRYTRNFLW_QZ_A = pd.read_csv('../contest/A/MBANK_QRYTRNFLW_QZ_A.csv')\n",
    "# MBANK_TRNFLW_QZ_A = pd.read_csv('../contest/A/MBANK_TRNFLW_QZ_A.csv')\n",
    "# NATURE_CUST_QZ_A = pd.read_csv('../contest/A/NATURE_CUST_QZ_A.csv')\n",
    "# TAGS_PROD_HOLD_QZ_A = pd.read_csv('../contest/A/TAGS_PROD_HOLD_QZ_A.csv')\n",
    "# TARGET_QZ_A = pd.read_csv('../contest/A/TARGET_QZ_A.csv')\n",
    "\n",
    "#读取b榜数据\n",
    "APS_QZ_B = pd.read_csv('../contest/B/APS_QZ_B.csv')\n",
    "CUST_FA_SUM_QZ_B = pd.read_csv('../contest/B/CUST_FA_SUM_QZ_B.csv')\n",
    "DP_CUST_SUM_QZ_B = pd.read_csv('../contest/B/DP_CUST_SUM_QZ_B.csv')\n",
    "EBANK_CSTLOG_QZ_B = pd.read_csv('../contest/B/EBANK_CSTLOG_QZ_B.csv')\n",
    "EBANK_CSTLOGQUERY_QZ_B = pd.read_csv('../contest/B/EBANK_CSTLOGQUERY_QZ_B.csv')\n",
    "MBANK_QRYTRNFLW_QZ_B = pd.read_csv('../contest/B/MBANK_QRYTRNFLW_QZ_B.csv')\n",
    "MBANK_TRNFLW_QZ_B = pd.read_csv('../contest/B/MBANK_TRNFLW_QZ_B.csv')\n",
    "NATURE_CUST_QZ_B = pd.read_csv('../contest/B/NATURE_CUST_QZ_B.csv')\n",
    "TAGS_PROD_HOLD_QZ_B = pd.read_csv('../contest/B/TAGS_PROD_HOLD_QZ_B.csv')\n",
    "TARGET_QZ_B = pd.read_csv('../contest/B/TARGET_QZ_B.csv')\n",
    "\n",
    "#读取训练集数据\n",
    "APS_QZ_train = pd.read_csv('../contest/train/APS_QZ.csv')\n",
    "CUST_FA_SUM_QZ_train = pd.read_csv('../contest/train/CUST_FA_SUM_QZ.csv')\n",
    "DP_CUST_SUM_QZ_train = pd.read_csv('../contest/train/DP_CUST_SUM_QZ.csv')\n",
    "EBANK_CSTLOG_QZ_train = pd.read_csv('../contest/train/EBANK_CSTLOG_QZ.csv')\n",
    "EBANK_CSTLOGQUERY_QZ_train = pd.read_csv('../contest/train/EBANK_CSTLOGQUERY_QZ.csv')\n",
    "MBANK_QRYTRNFLW_QZ_train = pd.read_csv('../contest/train/MBANK_QRYTRNFLW_QZ.csv')\n",
    "MBANK_TRNFLW_QZ_train = pd.read_csv('../contest/train/MBANK_TRNFLW_QZ.csv')\n",
    "NATURE_CUST_QZ_train = pd.read_csv('../contest/train/NATURE_CUST_QZ.csv')\n",
    "TAGS_PROD_HOLD_QZ_train = pd.read_csv('../contest/train/TAGS_PROD_HOLD_QZ.csv')\n",
    "TARGET_QZ_train = pd.read_csv('../contest/train/TARGET_QZ.csv')\n",
    "\n",
    "# 合并训练集及b榜数据\n",
    "MBANK_TRNFLW_QZ = pd.concat([MBANK_TRNFLW_QZ_train,MBANK_TRNFLW_QZ_B],axis=0)\n",
    "EBANK_CSTLOGQUERY_QZ = pd.concat([EBANK_CSTLOGQUERY_QZ_train,EBANK_CSTLOGQUERY_QZ_B],axis=0)\n",
    "DP_CUST_SUM_QZ = pd.concat([DP_CUST_SUM_QZ_train,DP_CUST_SUM_QZ_B],axis=0)\n",
    "TARGET_QZ = pd.concat([TARGET_QZ_train,TARGET_QZ_B],axis=0)\n",
    "MBANK_QRYTRNFLW_QZ = pd.concat([MBANK_QRYTRNFLW_QZ_train,MBANK_QRYTRNFLW_QZ_B],axis=0)\n",
    "EBANK_CSTLOGQUERY_QZ = pd.concat([EBANK_CSTLOGQUERY_QZ_train,EBANK_CSTLOGQUERY_QZ_B],axis=0)\n",
    "TAGS_PROD_HOLD_QZ = pd.concat([TAGS_PROD_HOLD_QZ_train,TAGS_PROD_HOLD_QZ_B],axis=0)\n",
    "APS_QZ = pd.concat([APS_QZ_train,APS_QZ_B],axis=0)\n",
    "CUST_FA_SUM_QZ = pd.concat([CUST_FA_SUM_QZ_train,CUST_FA_SUM_QZ_B],axis=0)\n",
    "\n",
    "# 查看数据量\n",
    "print('MBANK_TRNFLW_QZ.shape:',MBANK_TRNFLW_QZ.shape)\n",
    "print('EBANK_CSTLOGQUERY_QZ.shape:',EBANK_CSTLOGQUERY_QZ.shape)\n",
    "print('DP_CUST_SUM_QZ.shape:',DP_CUST_SUM_QZ.shape)\n",
    "print('TARGET_QZ.shape:',TARGET_QZ.shape)\n",
    "print('MBANK_QRYTRNFLW_QZ.shape:',MBANK_QRYTRNFLW_QZ.shape)\n",
    "print('EBANK_CSTLOGQUERY_QZ.shape:',EBANK_CSTLOGQUERY_QZ.shape)\n",
    "print('TAGS_PROD_HOLD_QZ.shape:',TAGS_PROD_HOLD_QZ.shape)\n",
    "print('APS_QZ.shape:',APS_QZ.shape)\n",
    "print('CUST_FA_SUM_QZ.shape:',CUST_FA_SUM_QZ.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "feature_path = './feature'## 单表处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:41:00.918896Z",
     "iopub.status.busy": "2023-11-07T03:41:00.918696Z",
     "iopub.status.idle": "2023-11-07T03:41:00.921690Z",
     "shell.execute_reply": "2023-11-07T03:41:00.921079Z",
     "shell.execute_reply.started": "2023-11-07T03:41:00.918871Z"
    }
   },
   "outputs": [],
   "source": [
    "feature_path = './feature'\n",
    "tmp_path = './tmp'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### MBANK_TRNFLW_QZ  掌银金融性流水表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:41:00.922843Z",
     "iopub.status.busy": "2023-11-07T03:41:00.922665Z",
     "iopub.status.idle": "2023-11-07T03:41:00.925428Z",
     "shell.execute_reply": "2023-11-07T03:41:00.924971Z",
     "shell.execute_reply.started": "2023-11-07T03:41:00.922821Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# # 处理时间类型数据\n",
    "# MBANK_TRNFLW_QZ['TFT_DTE_TIME'] = pd.to_datetime(MBANK_TRNFLW_QZ['TFT_DTE_TIME'], format = '%Y%m%d%H%M%S')\n",
    "# MBANK_TRNFLW_QZ['year'] = MBANK_TRNFLW_QZ['TFT_DTE_TIME'].dt.year\n",
    "# MBANK_TRNFLW_QZ['month'] = MBANK_TRNFLW_QZ['TFT_DTE_TIME'].dt.month\n",
    "# MBANK_TRNFLW_QZ['day'] = MBANK_TRNFLW_QZ['TFT_DTE_TIME'].dt.day\n",
    "# MBANK_TRNFLW_QZ['hour'] = MBANK_TRNFLW_QZ['TFT_DTE_TIME'].dt.hour\n",
    "# MBANK_TRNFLW_QZ['minute'] = MBANK_TRNFLW_QZ['TFT_DTE_TIME'].dt.minute\n",
    "# MBANK_TRNFLW_QZ['second'] = MBANK_TRNFLW_QZ['TFT_DTE_TIME'].dt.second\n",
    "\n",
    "\n",
    "# # 函数定义区\n",
    "# def gen_dense_features(df,col,stat,col_name):\n",
    "#     group_df = df.groupby(['TFT_CSTNO'])[col].agg(stat).reset_index()\n",
    "#     group_df.columns = ['TFT_CSTNO','TFT_CSTNO'+col_name+'{}_'.format(col) + stat]\n",
    "#     return group_df\n",
    "\n",
    "# def get_statistics_feature(df, cols, statistics,col_name):\n",
    "#     tr_feature = pd.DataFrame(df['TFT_CSTNO'].drop_duplicates().reset_index(drop=True) )\n",
    "#     for col in cols:\n",
    "#         for stat in statistics:\n",
    "#             tr_feature = tr_feature.merge(gen_dense_features(df,col,stat,col_name), on = 'TFT_CSTNO', how = 'left')\n",
    "#     return tr_feature\n",
    "\n",
    "\n",
    "# tr_feature_1 = get_statistics_feature(MBANK_TRNFLW_QZ, ['TFT_TRNAMT','month','day','hour','minute','second'], ['min','max','mean','std','median','count','sum','skew','nunique'],'TRNFLW')\n",
    "\n",
    "# print('tr_feature_1.shape:',tr_feature_1.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### EBANK_CSTLOGQUERY_QZ 网银非金融性流水表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:41:00.926345Z",
     "iopub.status.busy": "2023-11-07T03:41:00.926166Z",
     "iopub.status.idle": "2023-11-07T03:42:04.680494Z",
     "shell.execute_reply": "2023-11-07T03:42:04.679760Z",
     "shell.execute_reply.started": "2023-11-07T03:41:00.926323Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CSTLOGQUERY_feature.shape: (1082, 66)\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# 处理时间类型数据\n",
    "EBANK_CSTLOGQUERY_QZ = pd.concat([EBANK_CSTLOGQUERY_QZ_train,EBANK_CSTLOGQUERY_QZ_B],axis=0)\n",
    "EBANK_CSTLOGQUERY_QZ = EBANK_CSTLOGQUERY_QZ.merge(TARGET_QZ[['CUST_NO','DATA_DAT']].rename(columns = {'CUST_NO': 'CLQ_CSTNO'}))\n",
    "\n",
    "EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'] = pd.to_datetime(EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'], format = '%Y%m%d%H%M%S')\n",
    "EBANK_CSTLOGQUERY_QZ['DATA_DAT'] = pd.to_datetime(EBANK_CSTLOGQUERY_QZ['DATA_DAT'], format = '%Y%m%d')\n",
    "\n",
    "EBANK_CSTLOGQUERY_QZ['year'] = EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'].dt.year\n",
    "EBANK_CSTLOGQUERY_QZ['month'] = 4 -(EBANK_CSTLOGQUERY_QZ['DATA_DAT'].dt.month - EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'].dt.month)\n",
    "EBANK_CSTLOGQUERY_QZ['day'] = EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'].dt.day\n",
    "EBANK_CSTLOGQUERY_QZ['hour'] = EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'].dt.hour\n",
    "EBANK_CSTLOGQUERY_QZ['minute'] = EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'].dt.minute\n",
    "EBANK_CSTLOGQUERY_QZ['second'] = EBANK_CSTLOGQUERY_QZ['CLQ_DTE_TIME'].dt.second\n",
    "\n",
    "\n",
    "# 函数定义区\n",
    "def gen_dense_features(df,col,stat,col_name):\n",
    "    group_df = df.groupby(['CLQ_CSTNO'])[col].agg(stat).reset_index()\n",
    "    group_df.columns = ['CLQ_CSTNO','CLQ_CSTNO'+col_name+'{}_'.format(col) + stat]\n",
    "    return group_df\n",
    "\n",
    "def get_statistics_feature(df, cols, statistics,col_name):\n",
    "    tr_feature = pd.DataFrame(df['CLQ_CSTNO'].drop_duplicates().reset_index(drop=True) )\n",
    "    for col in cols:\n",
    "        for stat in statistics:\n",
    "            tr_feature = tr_feature.merge(gen_dense_features(df,col,stat,col_name), on = 'CLQ_CSTNO', how = 'left')\n",
    "    return tr_feature\n",
    "\n",
    "# 时间暴力特征\n",
    "CSTLOGQUERY_feature_1 = get_statistics_feature(EBANK_CSTLOGQUERY_QZ, ['month','day','hour','minute','second'], ['min','max','mean','std','median','count','sum','skew','nunique'],'CSTLOGQUERY')\n",
    "\n",
    "# 词袋和词频特征\n",
    "def text_feats(df, group_id, col, num):\n",
    "    df[col] = df[col].astype(str)\n",
    "    temp = df.groupby(group_id)[col].agg(list).reset_index()\n",
    "    temp[col] = temp[col].apply(lambda x: ' '.join(x))\n",
    "    # 将list 转化为str\n",
    "    tfidf_temp = tfidf(temp, col, num)\n",
    "    count_temp = count2vec(temp, col, num)\n",
    "    return pd.concat([temp[group_id], tfidf_temp, count_temp], axis=1) # 横向合并\n",
    "\n",
    "\n",
    "# 词频和词袋\n",
    "CSTLOGQUERY_feature_2 = utils.text_feats(EBANK_CSTLOGQUERY_QZ, 'CLQ_CSTNO', 'CLQ_BSNCOD', 10)\n",
    "\n",
    "# tr_feature_2 = get_statistics_feature(MBANK_TRNFLW_QZ, ['transcode'], ['count','nunique'],'trnflw')\n",
    "\n",
    "CSTLOGQUERY_feature = CSTLOGQUERY_feature_1.merge(CSTLOGQUERY_feature_2,  on = 'CLQ_CSTNO')\n",
    "    \n",
    "print('CSTLOGQUERY_feature.shape:', CSTLOGQUERY_feature.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### MBANK_QRYTRNFLW_QZ 掌银非金融性流水表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:42:04.681754Z",
     "iopub.status.busy": "2023-11-07T03:42:04.681547Z",
     "iopub.status.idle": "2023-11-07T03:45:46.996739Z",
     "shell.execute_reply": "2023-11-07T03:45:46.995967Z",
     "shell.execute_reply.started": "2023-11-07T03:42:04.681729Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "QRYTRNFLW_feature.shape: (41945, 66)\n"
     ]
    }
   ],
   "source": [
    "# 处理MBANK_QRYTRNFLW_QZ时间类型数据\n",
    "MBANK_QRYTRNFLW_QZ = pd.concat([MBANK_QRYTRNFLW_QZ_train,MBANK_QRYTRNFLW_QZ_B],axis=0)\n",
    "MBANK_QRYTRNFLW_QZ = MBANK_QRYTRNFLW_QZ.merge(TARGET_QZ[['CUST_NO','DATA_DAT']].rename(columns = {'CUST_NO': 'TFT_CSTNO'}))\n",
    "\n",
    "MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'] = pd.to_datetime(MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'], format = '%Y%m%d%H%M%S')\n",
    "MBANK_QRYTRNFLW_QZ['DATA_DAT'] = pd.to_datetime(MBANK_QRYTRNFLW_QZ['DATA_DAT'], format = '%Y%m%d')\n",
    "MBANK_QRYTRNFLW_QZ['year'] = MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'].dt.year\n",
    "MBANK_QRYTRNFLW_QZ['month'] = 4 - (MBANK_QRYTRNFLW_QZ['DATA_DAT'].dt.month - MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'].dt.month)\n",
    "MBANK_QRYTRNFLW_QZ['day'] = MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'].dt.day\n",
    "MBANK_QRYTRNFLW_QZ['hour'] = MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'].dt.hour\n",
    "MBANK_QRYTRNFLW_QZ['minute'] = MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'].dt.minute\n",
    "MBANK_QRYTRNFLW_QZ['second'] = MBANK_QRYTRNFLW_QZ['TFT_DTE_TIME'].dt.second\n",
    "\n",
    "\n",
    "\n",
    "# 函数定义区\n",
    "def gen_dense_features(df,col,stat,col_name):\n",
    "    group_df = df.groupby(['TFT_CSTNO'])[col].agg(stat).reset_index()\n",
    "    group_df.columns = ['TFT_CSTNO','TFT_CSTNO'+col_name+'{}_'.format(col) + stat]\n",
    "    return group_df\n",
    "\n",
    "def get_statistics_feature(df, cols, statistics,col_name):\n",
    "    tr_feature = pd.DataFrame(df['TFT_CSTNO'].drop_duplicates().reset_index(drop=True) )\n",
    "    for col in cols:\n",
    "        for stat in statistics:\n",
    "            tr_feature = tr_feature.merge(gen_dense_features(df,col,stat,col_name), on = 'TFT_CSTNO', how = 'left')\n",
    "    return tr_feature\n",
    "\n",
    "# 时间暴力特征\n",
    "QRYTRNFLW_feature_1 = get_statistics_feature(MBANK_QRYTRNFLW_QZ, ['month','day','hour','minute','second'], ['min','max','mean','std','median','count','sum','skew','nunique'],'QRYTRNFLW')\n",
    "\n",
    "def text_feats(df, group_id, col, num):\n",
    "    df[col] = df[col].astype(str)\n",
    "    temp = df.groupby(group_id)[col].agg(list).reset_index()\n",
    "    temp[col] = temp[col].apply(lambda x: ' '.join(x))\n",
    "    # 将list 转化为str\n",
    "    tfidf_temp = tfidf(temp, col, num)\n",
    "    count_temp = count2vec(temp, col, num)\n",
    "    return pd.concat([temp[group_id], tfidf_temp, count_temp], axis=1) # 横向合并\n",
    "\n",
    "\n",
    "# 词频和词袋\n",
    "QRYTRNFLW_feature_2 = utils.text_feats(MBANK_QRYTRNFLW_QZ, 'TFT_CSTNO', 'TFT_STDBSNCOD', 10)\n",
    "\n",
    "QRYTRNFLW_feature = QRYTRNFLW_feature_1.merge(QRYTRNFLW_feature_2,  on = 'TFT_CSTNO')\n",
    "\n",
    "print('QRYTRNFLW_feature.shape:',QRYTRNFLW_feature.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### TAGS_PROD_HOLD_QZ 客户产品持有表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:45:46.997817Z",
     "iopub.status.busy": "2023-11-07T03:45:46.997620Z",
     "iopub.status.idle": "2023-11-07T03:45:47.014359Z",
     "shell.execute_reply": "2023-11-07T03:45:47.013716Z",
     "shell.execute_reply.started": "2023-11-07T03:45:46.997792Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "PROD_feature = TAGS_PROD_HOLD_QZ.drop('DATA_DAT',axis = 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 保存特征矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:45:47.016364Z",
     "iopub.status.busy": "2023-11-07T03:45:47.016180Z",
     "iopub.status.idle": "2023-11-07T03:45:47.227582Z",
     "shell.execute_reply": "2023-11-07T03:45:47.226894Z",
     "shell.execute_reply.started": "2023-11-07T03:45:47.016342Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "feature 文件夹已经存在\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# 判断feature文件夹是否存在\n",
    "if not os.path.exists(feature_path):\n",
    "    os.mkdir(feature_path)\n",
    "    print('feature 文件夹已经创建')\n",
    "else:\n",
    "    print('feature 文件夹已经存在')\n",
    "\n",
    "    \n",
    "    \n",
    "# 指定要保存的文件名\n",
    "PROD_feature_file_name = \"./feature/B_LYH_8_feature_PROD_feature_v1.pkl\"\n",
    "QRYTRNFLW_feature_file_name = \"./feature/B_LYH_2_QRYTRNFLW_feature_v1.pkl\"\n",
    "CSTLOGQUERY_feature_file_name = \"./feature/B_LYH_4_CSTLOGQUERY_feature_v1.pkl\"\n",
    "\n",
    "\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(PROD_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(PROD_feature, file)\n",
    "    \n",
    "with open(QRYTRNFLW_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(QRYTRNFLW_feature.rename(columns = {'TFT_CSTNO':'CUST_NO'}), file)\n",
    "    \n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(CSTLOGQUERY_feature.rename(columns = {'CLQ_CSTNO':'CUST_NO'}), file)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 业务特征处理 APS_QZ 借记卡流水表  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:45:47.229180Z",
     "iopub.status.busy": "2023-11-07T03:45:47.228963Z",
     "iopub.status.idle": "2023-11-07T03:45:47.233618Z",
     "shell.execute_reply": "2023-11-07T03:45:47.232981Z",
     "shell.execute_reply.started": "2023-11-07T03:45:47.229155Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tmp 文件夹已经存在\n"
     ]
    }
   ],
   "source": [
    "tmp_path = './tmp'\n",
    "# 判断feature文件夹是否存在\n",
    "if not os.path.exists(feature_path):\n",
    "    os.mkdir(feature_path)\n",
    "    print('tmp 文件夹已经创建')\n",
    "else:\n",
    "    print('tmp 文件夹已经存在')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:45:47.234571Z",
     "iopub.status.busy": "2023-11-07T03:45:47.234402Z",
     "iopub.status.idle": "2023-11-07T03:46:11.405709Z",
     "shell.execute_reply": "2023-11-07T03:46:11.405065Z",
     "shell.execute_reply.started": "2023-11-07T03:45:47.234550Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 处理流水表日期格式\n",
    "APS_QZ['APSDTRDAT_TM'] = pd.to_datetime(APS_QZ['APSDTRDAT_TM'], format = '%Y%m%d%H%M%S')\n",
    "# APS_QZ['APSDTRDAT_TM_day'] = APS_QZ['APSDTRDAT_TM'].dt.strftime('%Y%m%d')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 等额高频交易"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:46:11.406784Z",
     "iopub.status.busy": "2023-11-07T03:46:11.406594Z",
     "iopub.status.idle": "2023-11-07T03:49:28.752666Z",
     "shell.execute_reply": "2023-11-07T03:49:28.752021Z",
     "shell.execute_reply.started": "2023-11-07T03:46:11.406761Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mean\n",
      "max\n",
      "sum\n",
      "count\n"
     ]
    }
   ],
   "source": [
    "def same_amt_times(group,stat):\n",
    "    amt_col = 'APSDTRAMT'\n",
    "    valuecounts = group[amt_col].value_counts()\n",
    "    valuecounts = valuecounts[valuecounts>5]\n",
    "    \n",
    "    if stat == 'mean':\n",
    "        return valuecounts.mean()\n",
    "    if stat == 'max':\n",
    "        return valuecounts.max()\n",
    "    if stat == 'sum':\n",
    "        return valuecounts.sum()\n",
    "    if stat == 'count':\n",
    "        return valuecounts.count()\n",
    "\n",
    "def same_amt_days(group):\n",
    "    amt_col = 'APSDTRAMT'\n",
    "    valuecounts = group[amt_col].value_counts()\n",
    "    return 1 if valuecounts[valuecounts>5].size>=1 else 0\n",
    "\n",
    "\n",
    "same_amt_times_res = TARGET_QZ[['CUST_NO','CARD_NO']]\n",
    "APS_QZ_group = pd.DataFrame(APS_QZ.groupby('APSDPRDNO'))\n",
    "list_1 = ['mean','max','sum','count']\n",
    "\n",
    "# same_amt_tmp = APS_QZ.groupby('APSDPRDNO').apply(lambda x:same_amt_times(x, stat = 'mean'))\n",
    "\n",
    "for i in list_1:\n",
    "    print(i)     \n",
    "    same_amt_tmp = APS_QZ.groupby('APSDPRDNO').apply(lambda x:same_amt_times(x, stat = i)).reset_index()\n",
    "    same_amt_tmp.columns = ['CARD_NO', 'aps_6_same_amt_times'+i]\n",
    "    same_amt_times_res = same_amt_times_res.merge(same_amt_tmp, on='CARD_NO', how = 'left')\n",
    "\n",
    "\n",
    "\n",
    "## 保存文件\n",
    "import pickle\n",
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_same_amt_times_res.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(same_amt_times_res, file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 快进快出\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T03:49:28.753850Z",
     "iopub.status.busy": "2023-11-07T03:49:28.753651Z",
     "iopub.status.idle": "2023-11-07T05:13:42.538536Z",
     "shell.execute_reply": "2023-11-07T05:13:42.537958Z",
     "shell.execute_reply.started": "2023-11-07T03:49:28.753825Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/woody/anaconda3/lib/python3.8/site-packages/tqdm/std.py:668: FutureWarning: The Panel class is removed from pandas. Accessing it from the top-level namespace will also be removed in the next version\n",
      "  from pandas import Panel\n",
      "  0%|          | 32/42016 [00:05<1:10:49,  9.88it/s]<ipython-input-12-5b87dd07214b>:17: RuntimeWarning: divide by zero encountered in double_scalars\n",
      "  rate = group_bet[group_bet['APSDTRAMT']<0]['APSDTRAMT'].sum()/group_bet[group_bet['APSDTRAMT']>0]['APSDTRAMT'].sum()\n",
      "100%|██████████| 42016/42016 [1:23:44<00:00,  8.36it/s]  \n"
     ]
    }
   ],
   "source": [
    "APS_QZ = pd.concat([APS_QZ_train,APS_QZ_B],axis=0)\n",
    "APS_QZ['APSDTRDAT_TM'] = pd.to_datetime(APS_QZ['APSDTRDAT_TM'], format = '%Y%m%d%H%M%S')\n",
    "\n",
    "def fast_in_out(group):\n",
    "    group = group.sort_values(by='APSDTRDAT_TM')\n",
    "    fast_in_amt = [0]\n",
    "    fast_out_amt = [0]\n",
    "\n",
    "    for index,row in group.iterrows():\n",
    "        begin_time = row['APSDTRDAT_TM']\n",
    "        end_time = row['APSDTRDAT_TM']+pd.Timedelta(minutes = 60)\n",
    "        group_bet = group[(group['APSDTRDAT_TM']>=begin_time) & (group['APSDTRDAT_TM']<=end_time) ]\n",
    "        \n",
    "        if group_bet.shape[0] < 10: continue\n",
    "\n",
    "            \n",
    "        rate = group_bet[group_bet['APSDTRAMT']<0]['APSDTRAMT'].sum()/group_bet[group_bet['APSDTRAMT']>0]['APSDTRAMT'].sum()\n",
    "        \n",
    "        if abs(rate)>0.9 and abs(rate)<1.1 :\n",
    "                \n",
    "            \n",
    "#             fast_in_amt.append(group_bet[group_bet['APSDTRAMT']>0]['APSDTRAMT'].sum())\n",
    "#             fast_out_amt.append(group_bet[group_bet['APSDTRAMT']<0]['APSDTRAMT'].sum())\n",
    "            \n",
    "            return len(fast_in_amt)-1,group_bet[group_bet['APSDTRAMT']>0]['APSDTRAMT'].sum(),group_bet[group_bet['APSDTRAMT']<0]['APSDTRAMT'].sum()\n",
    "#     print(1)\n",
    "    \n",
    "    \n",
    "# return len(fast_in_amt),sum(fast_in_amt),sum(fast_out_amt),max(fast_in_amt),max(fast_out_amt)\n",
    "tqdm.tqdm.pandas()\n",
    "\n",
    "same_amt_tmp = APS_QZ.groupby('APSDPRDNO').progress_apply(lambda x:fast_in_out(x)).reset_index()  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:13:42.539647Z",
     "iopub.status.busy": "2023-11-07T05:13:42.539454Z",
     "iopub.status.idle": "2023-11-07T05:13:42.766108Z",
     "shell.execute_reply": "2023-11-07T05:13:42.765591Z",
     "shell.execute_reply.started": "2023-11-07T05:13:42.539621Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-13-b256c04cf546>:18: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  aps_fast_in_out['aps_fast_in_out'] = aps_fast_in_out_flag\n",
      "<ipython-input-13-b256c04cf546>:19: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  aps_fast_in_out['aps_fast_in_amt_flag'] = aps_fast_in_amt_flag\n",
      "<ipython-input-13-b256c04cf546>:20: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  aps_fast_in_out['aps_fast_out_amt_flag'] = aps_fast_out_amt_flag\n"
     ]
    }
   ],
   "source": [
    "aps_fast_in_out = same_amt_tmp[~same_amt_tmp[0].isnull()]\n",
    "aps_fast_in_out.columns = ['APSDPRDNO', 'data']\n",
    "\n",
    "aps_fast_in_out_flag = []\n",
    "aps_fast_in_amt_flag = []\n",
    "aps_fast_out_amt_flag = []\n",
    "\n",
    "for index,row in aps_fast_in_out.iterrows():\n",
    "#     print(row['data'][1])\n",
    "    \n",
    "    a = row['data'][1]\n",
    "    b = row['data'][2]\n",
    "\n",
    "    aps_fast_in_out_flag.append(1)\n",
    "    aps_fast_in_amt_flag.append(a)\n",
    "    aps_fast_out_amt_flag.append(b)\n",
    "    \n",
    "aps_fast_in_out['aps_fast_in_out'] = aps_fast_in_out_flag\n",
    "aps_fast_in_out['aps_fast_in_amt_flag'] = aps_fast_in_amt_flag\n",
    "aps_fast_in_out['aps_fast_out_amt_flag'] = aps_fast_out_amt_flag\n",
    "\n",
    "aps_fast_inout_feature = aps_fast_in_out.drop('data',axis = 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:13:42.767082Z",
     "iopub.status.busy": "2023-11-07T05:13:42.766899Z",
     "iopub.status.idle": "2023-11-07T05:13:42.772659Z",
     "shell.execute_reply": "2023-11-07T05:13:42.772189Z",
     "shell.execute_reply.started": "2023-11-07T05:13:42.767059Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_aps_fast_inout_feature.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(aps_fast_inout_feature, file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 近N天陌生交易对手个数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:13:42.773565Z",
     "iopub.status.busy": "2023-11-07T05:13:42.773391Z",
     "iopub.status.idle": "2023-11-07T05:20:31.921387Z",
     "shell.execute_reply": "2023-11-07T05:20:31.920816Z",
     "shell.execute_reply.started": "2023-11-07T05:13:42.773544Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 42016/42016 [01:13<00:00, 570.45it/s]\n",
      "  0%|          | 0/42016 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 42016/42016 [01:17<00:00, 542.11it/s]\n",
      "  0%|          | 0/42016 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 42016/42016 [01:20<00:00, 523.01it/s]\n",
      "  0%|          | 0/42016 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 42016/42016 [01:21<00:00, 513.08it/s]\n",
      "  0%|          | 0/42016 [00:00<?, ?it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 42016/42016 [01:23<00:00, 505.71it/s]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "APS_QZ_flag = APS_QZ.rename(columns = {'APSDPRDNO':'CARD_NO'}).merge(TARGET_QZ, on = 'CARD_NO', how = 'left')\n",
    "APS_QZ_flag['DATA_DAT'] = pd.to_datetime(APS_QZ_flag['DATA_DAT'], format = '%Y%m%d')\n",
    "\n",
    "# 计算天数\n",
    "APS_QZ_flag['days'] = (APS_QZ_flag['DATA_DAT'] - APS_QZ_flag['APSDTRDAT_TM']).dt.days\n",
    "\n",
    "\n",
    "\n",
    "stranger_num_res = TARGET_QZ[['CUST_NO','CARD_NO']]\n",
    "APS_QZ_group = APS_QZ_flag.groupby('CARD_NO')\n",
    "for day in range(1,15,3):\n",
    "    def stranger_num(group,day):\n",
    "    #    print(day)\n",
    "        nums = 0\n",
    "        cpt_07_list = group[group['days'] <= day]['APSDCPTPRDNO'].unique()\n",
    "        cpt_7__list = group[group['days'] > day]['APSDCPTPRDNO'].unique()\n",
    "\n",
    "        for element in cpt_07_list:\n",
    "            if element not in cpt_7__list: nums += 1\n",
    "\n",
    "        return nums\n",
    "\n",
    "    print(day)\n",
    "    tqdm.tqdm.pandas()\n",
    "    stranger_num = APS_QZ_group.progress_apply(lambda x:stranger_num(x,day+30)).reset_index()\n",
    "    stranger_num.columns = ['CARD_NO','stranger_num'+str(day)]\n",
    "    stranger_num_res = stranger_num_res.merge(stranger_num,how = 'left', on = 'CARD_NO')\n",
    "\n",
    "# import pickle\n",
    "# with open('../LYH/B_stranger_num_res.pkl', 'rb') as file:\n",
    "#     stranger_num_res = pickle.load(file)\n",
    "\n",
    "import pickle\n",
    "\n",
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_stranger_num_res.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(stranger_num_res, file)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 夜间交易对手"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:20:31.922536Z",
     "iopub.status.busy": "2023-11-07T05:20:31.922335Z",
     "iopub.status.idle": "2023-11-07T05:20:34.258137Z",
     "shell.execute_reply": "2023-11-07T05:20:34.257532Z",
     "shell.execute_reply.started": "2023-11-07T05:20:31.922511Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "APS_QZ_flag['hour'] = APS_QZ_flag['APSDTRDAT_TM'].dt.hour\n",
    "\n",
    "# 统计夜间交易特征\n",
    "APS_QZ_night_cpt = pd.DataFrame(APS_QZ_flag[(APS_QZ_flag['hour'] >=0) & (APS_QZ_flag['hour'] <= 6)].groupby('CARD_NO')['APSDCPTPRDNO'].agg('count'))\n",
    "APS_QZ_night_amt = pd.DataFrame(APS_QZ_flag[(APS_QZ_flag['hour'] >=0) & (APS_QZ_flag['hour'] <= 6)].groupby('CARD_NO')['APSDTRAMT'].agg('sum')).reset_index()\n",
    "APS_QZ_night_amt_in = pd.DataFrame(APS_QZ_flag[(APS_QZ_flag['hour'] >=0) & (APS_QZ_flag['hour'] <= 6) & (APS_QZ_flag['APSDTRAMT'] < 0)].groupby('CARD_NO')['APSDTRAMT'].agg('sum')).reset_index()\n",
    "APS_QZ_night_amt_out = pd.DataFrame(APS_QZ_flag[(APS_QZ_flag['hour'] >=0) & (APS_QZ_flag['hour'] <= 6) & (APS_QZ_flag['APSDTRAMT'] > 0)].groupby('CARD_NO')['APSDTRAMT'].agg('sum')).reset_index()\n",
    "\n",
    "\n",
    "#合并特征\n",
    "APS_QZ_night_cpt = APS_QZ_night_cpt.merge(APS_QZ_night_amt,on = 'CARD_NO', how='left')\n",
    "APS_QZ_night_cpt = APS_QZ_night_cpt.merge(APS_QZ_night_amt_in,on = 'CARD_NO', how='left')\n",
    "APS_QZ_night_cpt = APS_QZ_night_cpt.merge(APS_QZ_night_amt_out,on = 'CARD_NO', how='left')\n",
    "\n",
    "APS_QZ_night_cpt.columns = ['CARD_NO','APSDCPTPRDNO_night_cpt','APSDCPTPRDNO_night_amt','APSDCPTPRDNO_night_amt_in','APSDCPTPRDNO_night_amt_out']\n",
    "\n",
    "# 保存文件\n",
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_APS_QZ_night_cpt.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(APS_QZ_night_cpt, file)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-24T13:02:02.458379Z",
     "iopub.status.busy": "2023-10-24T13:02:02.458100Z",
     "iopub.status.idle": "2023-10-24T13:02:02.992164Z",
     "shell.execute_reply": "2023-10-24T13:02:02.991568Z",
     "shell.execute_reply.started": "2023-10-24T13:02:02.458350Z"
    },
    "tags": []
   },
   "source": [
    "### 日借贷方特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:20:34.259307Z",
     "iopub.status.busy": "2023-11-07T05:20:34.259106Z",
     "iopub.status.idle": "2023-11-07T05:21:33.653201Z",
     "shell.execute_reply": "2023-11-07T05:21:33.652587Z",
     "shell.execute_reply.started": "2023-11-07T05:20:34.259282Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "## 日交易小时数\n",
    "## 流水表\n",
    "# 日交易小时数\n",
    "# 日借方交易额/贷方交易额\n",
    "# 日借贷交易金额比\n",
    "# 日贷方交易金额\n",
    "# 日借方交易金额\n",
    "# 日净交易金额\n",
    "APS_QZ = pd.concat([APS_QZ_train,APS_QZ_B],axis=0)\n",
    "APS_QZ = APS_QZ.rename(columns = {'APSDPRDNO':'CARD_NO'}).merge(TARGET_QZ, how='left', on = 'CARD_NO')\n",
    "\n",
    "APS_QZ['APSDTRDAT_TM'] =  pd.to_datetime(APS_QZ['APSDTRDAT_TM'] , format = '%Y%m%d%H%M%S')\n",
    "APS_QZ['APSDTRDAT_TM_H'] = APS_QZ['APSDTRDAT_TM'].dt.hour\n",
    "APS_QZ['DATA_DAT'] =  pd.to_datetime(APS_QZ['DATA_DAT'] , format = '%Y%m%d')\n",
    "APS_QZ['DATA_DAT_diff'] = ( APS_QZ['DATA_DAT'] - APS_QZ['APSDTRDAT_TM'] ).dt.days\n",
    "\n",
    "\n",
    "\n",
    "APS_QZ_feature = TARGET_QZ\n",
    "for day in [7,14,21,28,35]:\n",
    "    \n",
    "    APS_QZ_daydiff = APS_QZ[APS_QZ['DATA_DAT_diff']<(day+30)]\n",
    "    APS_QZ_feature_tmp = pd.DataFrame(APS_QZ_daydiff.groupby('CARD_NO')['APSDTRDAT_TM_H'].agg('nunique')).reset_index()\n",
    "    APS_QZ_feature_tmp.columns = ['CARD_NO','APSDTRDAT_TM_H'+str(day)]\n",
    "    APS_QZ_feature = APS_QZ_feature.merge(APS_QZ_feature_tmp, on = 'CARD_NO', how = 'left')\n",
    "    \n",
    "    # 日借方交易额/贷方交易额\n",
    "    APS_QZ_feature_tmp_in = APS_QZ_daydiff[APS_QZ_daydiff['APSDTRAMT'] > 0].groupby('CARD_NO')['APSDTRAMT'].agg(['count','sum']).reset_index()\n",
    "    APS_QZ_feature_tmp_out = APS_QZ_daydiff[APS_QZ_daydiff['APSDTRAMT'] < 0].groupby('CARD_NO')['APSDTRAMT'].agg(['count','sum']).reset_index()\n",
    "    \n",
    "    APS_QZ_feature_tmp_in.columns = ['CARD_NO','APSDTRAMT_in_count_'+str(day),'APSDTRAMT_in_sum_'+str(day)]\n",
    "    APS_QZ_feature_tmp_out.columns = ['CARD_NO','APSDTRAMT_out_count_'+str(day),'APSDTRAMT_out_sum_'+str(day)]\n",
    "    \n",
    "    APS_QZ_feature = APS_QZ_feature.merge(APS_QZ_feature_tmp_in, on = 'CARD_NO', how = 'left')\n",
    "    APS_QZ_feature = APS_QZ_feature.merge(APS_QZ_feature_tmp_out, on = 'CARD_NO', how = 'left')\n",
    "    \n",
    "    APS_QZ_feature['APSDTRAMT_count_rate_'+str(day)] = APS_QZ_feature['APSDTRAMT_in_count_'+str(day)]/APS_QZ_feature['APSDTRAMT_out_count_'+str(day)] \n",
    "    APS_QZ_feature['APSDTRAMT_sum_rate_'+str(day)] = APS_QZ_feature['APSDTRAMT_in_sum_'+str(day)]/APS_QZ_feature['APSDTRAMT_out_sum_'+str(day)] \n",
    "    \n",
    "    \n",
    "APS_QZ_inoutday_feature = APS_QZ_feature.drop(['DATA_DAT', 'CARD_NO', 'FLAG'],axis = 1)\n",
    "\n",
    "\n",
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_APS_QZ_inoutday_feature.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(APS_QZ_inoutday_feature, file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "### 小额测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:21:33.654309Z",
     "iopub.status.busy": "2023-11-07T05:21:33.654113Z",
     "iopub.status.idle": "2023-11-07T05:57:53.653768Z",
     "shell.execute_reply": "2023-11-07T05:57:53.653121Z",
     "shell.execute_reply.started": "2023-11-07T05:21:33.654285Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 47/42016 [00:06<1:31:28,  7.65it/s] <ipython-input-18-2a21bcf7d6f1>:5: RuntimeWarning: divide by zero encountered in double_scalars\n",
      "  if abs(group.iloc[i+1]['APSDTRAMT']/group.iloc[i]['APSDTRAMT']) > 3 :  times +=1\n",
      "100%|██████████| 42016/42016 [36:18<00:00, 19.29it/s]  \n"
     ]
    }
   ],
   "source": [
    "\n",
    "def small_test(group):\n",
    "    group_tmp = group.sort_values(by = 'APSDTRDAT_TM').copy()\n",
    "    times = 0\n",
    "    for i in range(len(group)-1):\n",
    "        if abs(group.iloc[i+1]['APSDTRAMT']/group.iloc[i]['APSDTRAMT']) > 3 :  times +=1\n",
    "    return times\n",
    "\n",
    "tqdm.tqdm.pandas()\n",
    "APS_QZ_small_test = APS_QZ.groupby('CARD_NO').progress_apply(lambda x:small_test(x)).reset_index() \n",
    "APS_QZ_small_test.columns = ['CARD_NO','APS_QZ_small_test']\n",
    "\n",
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_APS_QZ_small_test.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(APS_QZ_small_test, file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-27T08:11:22.494126Z",
     "iopub.status.busy": "2023-10-27T08:11:22.493715Z",
     "iopub.status.idle": "2023-10-27T08:11:22.497656Z",
     "shell.execute_reply": "2023-10-27T08:11:22.496930Z",
     "shell.execute_reply.started": "2023-10-27T08:11:22.494084Z"
    }
   },
   "source": [
    "APS_QZ_date### 单日n笔交易以上天数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 单日n笔交易以上天数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:57:53.654970Z",
     "iopub.status.busy": "2023-11-07T05:57:53.654773Z",
     "iopub.status.idle": "2023-11-07T05:58:04.815513Z",
     "shell.execute_reply": "2023-11-07T05:58:04.814857Z",
     "shell.execute_reply.started": "2023-11-07T05:57:53.654945Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "5\n",
      "10\n",
      "15\n",
      "20\n",
      "25\n",
      "30\n",
      "35\n",
      "40\n",
      "45\n"
     ]
    }
   ],
   "source": [
    "# APS_QZ = APS_QZ.rename(columns = {'APSDPRDNO':'CARD_NO'}).merge(TARGET_QZ, how='left', on = 'CARD_NO')\n",
    "\n",
    "APS_QZ_date = APS_QZ.copy()\n",
    "APS_QZ_date['APSDTRDAT_TM_date'] = APS_QZ_date['APSDTRDAT_TM'].dt.date\n",
    "APS_QZ_count = APS_QZ_date.groupby(['CARD_NO','APSDTRDAT_TM_date'])['CARD_NO'].agg(['count']).reset_index()\n",
    "\n",
    "aps_trans_days = TARGET_QZ[['CUST_NO','CARD_NO']]\n",
    "for counts in range(5,50,5):\n",
    "    print(counts)\n",
    "    \n",
    "    APS_QZ_count_tmp = APS_QZ_count[APS_QZ_count['count'] > counts].groupby('CARD_NO')['APSDTRDAT_TM_date'].agg('count').reset_index()\n",
    "    APS_QZ_count_tmp.columns = ['CARD_NO','APSDTRDAT_TM_date_count'+str(counts)]\n",
    "    aps_trans_days = aps_trans_days.merge(APS_QZ_count_tmp, on = 'CARD_NO', how = 'left')\n",
    "aps_trans_days = aps_trans_days.fillna(0).drop('CARD_NO',axis =1)\n",
    "\n",
    "\n",
    "CSTLOGQUERY_feature_file_name = tmp_path+\"/B_aps_trans_days_feature.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(aps_trans_days, file)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 保存特征矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:04.816756Z",
     "iopub.status.busy": "2023-11-07T05:58:04.816546Z",
     "iopub.status.idle": "2023-11-07T05:58:05.307186Z",
     "shell.execute_reply": "2023-11-07T05:58:05.306525Z",
     "shell.execute_reply.started": "2023-11-07T05:58:04.816731Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "\n",
    "with open(tmp_path+'/B_APS_QZ_inoutday_feature.pkl', 'rb') as file:\n",
    "    APS_QZ_inoutday_feature = pickle.load(file)\n",
    "    \n",
    "with open(tmp_path+'/B_APS_QZ_night_cpt.pkl', 'rb') as file:\n",
    "    APS_QZ_night_cpt = pickle.load(file)\n",
    "\n",
    "with open(tmp_path+'/B_same_amt_times_res.pkl', 'rb') as file:\n",
    "    same_amt_times_res = pickle.load(file).drop('CARD_NO',axis = 1)\n",
    "    \n",
    "with open(tmp_path+'/B_stranger_num_res.pkl', 'rb') as file:\n",
    "    stranger_num_res = pickle.load(file).drop('CARD_NO',axis = 1)\n",
    "    \n",
    "with open(tmp_path+'/B_APS_QZ_small_test.pkl', 'rb') as file:\n",
    "    APS_QZ_small_test = pickle.load(file)\n",
    "\n",
    "with open(tmp_path+'/B_aps_fast_inout_feature.pkl', 'rb') as file:\n",
    "    aps_fast_inout_feature = pickle.load(file)\n",
    "    \n",
    "with open(tmp_path+'/B_aps_trans_days_feature.pkl', 'rb') as file:\n",
    "    aps_trans_days_feature = pickle.load(file)\n",
    "    \n",
    "feature_matrix = TARGET_QZ[['CUST_NO','CARD_NO']]\n",
    "feature_matrix = feature_matrix.merge(APS_QZ_inoutday_feature, on = 'CUST_NO', how = 'left')\n",
    "feature_matrix = feature_matrix.merge(APS_QZ_night_cpt, on = 'CARD_NO', how = 'left')\n",
    "feature_matrix = feature_matrix.merge(same_amt_times_res, on = 'CUST_NO', how = 'left')\n",
    "feature_matrix = feature_matrix.merge(stranger_num_res, on = 'CUST_NO', how = 'left')\n",
    "feature_matrix = feature_matrix.merge(APS_QZ_small_test, on = 'CARD_NO', how = 'left')\n",
    "feature_matrix = feature_matrix.merge(aps_fast_inout_feature.rename(columns = {'APSDPRDNO':'CARD_NO'}), on = 'CARD_NO', how = 'left')\n",
    "feature_matrix = feature_matrix.merge(aps_trans_days_feature, on = 'CUST_NO', how = 'left')\n",
    "\n",
    "feature_matrix = feature_matrix.drop('CARD_NO' ,axis = 1)\n",
    "\n",
    "\n",
    "feature_matrix_path = feature_path+\"/B_LYH_4_work_feature_v2.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(feature_matrix_path, 'wb') as file:\n",
    "    pickle.dump(feature_matrix, file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-26T01:47:33.128647Z",
     "iopub.status.busy": "2023-10-26T01:47:33.128360Z",
     "iopub.status.idle": "2023-10-26T01:47:33.131469Z",
     "shell.execute_reply": "2023-10-26T01:47:33.130964Z",
     "shell.execute_reply.started": "2023-10-26T01:47:33.128618Z"
    }
   },
   "source": [
    "## LR模型融合特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-26T06:46:18.124532Z",
     "iopub.status.busy": "2023-10-26T06:46:18.124252Z",
     "iopub.status.idle": "2023-10-26T06:46:18.127478Z",
     "shell.execute_reply": "2023-10-26T06:46:18.126870Z",
     "shell.execute_reply.started": "2023-10-26T06:46:18.124503Z"
    }
   },
   "source": [
    "### 存款表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:05.308428Z",
     "iopub.status.busy": "2023-11-07T05:58:05.308228Z",
     "iopub.status.idle": "2023-11-07T05:58:06.099725Z",
     "shell.execute_reply": "2023-11-07T05:58:06.098865Z",
     "shell.execute_reply.started": "2023-11-07T05:58:05.308403Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['DPSA_BAL', 'MAVER_DPSA_BAL', 'SAVER_DPSA_BAL', 'YAVER_DPSA_BAL', 'TD_BAL', 'MAVER_TD_BAL', 'SAVER_TD_BAL', 'YAVER_TD_BAL']\n",
      "最佳阈值:  0.08993777197517526\n",
      "打印分类报告\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "         0.0       0.98      0.80      0.88      7006\n",
      "         1.0       0.15      0.67      0.25       391\n",
      "\n",
      "    accuracy                           0.79      7397\n",
      "   macro avg       0.57      0.73      0.56      7397\n",
      "weighted avg       0.93      0.79      0.84      7397\n",
      "\n",
      "(0.40141494924638577, 0.6675191815856778, 0.15471250740960285, 0.08993777197517526) 0.7940827847230689\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-21-d0e549150f65>:38: RuntimeWarning: invalid value encountered in true_divide\n",
      "  f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n"
     ]
    }
   ],
   "source": [
    "DP_CUST_SUM_QZ_flag = DP_CUST_SUM_QZ.merge(TARGET_QZ[['CUST_NO','FLAG']],on = 'CUST_NO', how = 'left')\n",
    "\n",
    "# 训练集\n",
    "train_df = DP_CUST_SUM_QZ_flag[~DP_CUST_SUM_QZ_flag['FLAG'].isnull()].reset_index(drop=True)\n",
    "# 测试集\n",
    "test_df = DP_CUST_SUM_QZ_flag[DP_CUST_SUM_QZ_flag['FLAG'].isnull()]\n",
    "\n",
    "\n",
    "# 预删除特征 \n",
    "drop_cols = ['DATA_DAT','CARD_NO','CUST_NO', 'FLAG']\n",
    "feature_name = [i for i in train_df.columns if i not in drop_cols]\n",
    "\n",
    "# 构建训练数据和测试数据\n",
    "X_train = train_df.copy()\n",
    "y = X_train['FLAG']\n",
    "X_train = X_train[feature_name]\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classesprint(feature_name)\n",
    "\n",
    "print(feature_name)\n",
    "\n",
    "def f_score(y_true, y_pred):\n",
    "\n",
    "    precisions, recalls, thresholds = precision_recall_curve(y_true, y_pred)\n",
    "\n",
    "    # F1\n",
    "#     f1_scores = (2 * precisions * recalls) / (precisions + recalls)\n",
    "#     best_t = thresholds[np.argmax(f1_scores[np.isfinite(f1_scores)])]\n",
    "#     y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "#     recall = recall_score(y_true, y_1)\n",
    "#     precision = precision_score(y_true, y_1)\n",
    "#     F_score = f1_score(y_true, y_1)\n",
    "#     F_score = (2 * precision * recall) / (precision + recall)\n",
    "\n",
    "    # F2\n",
    "    f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n",
    "    best_t = thresholds[np.argmax(f2_scores[np.isfinite(f2_scores)])]\n",
    "    y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "    recall = recall_score(y_true, y_1)\n",
    "    precision = precision_score(y_true, y_1)\n",
    "    F_score = (5 * precision * recall) / (4*precision + recall)\n",
    "\n",
    "    #print(f\"valid's f1: {F_score}\")\n",
    "    print(\"最佳阈值: \", str(best_t))\n",
    "    print('打印分类报告')\n",
    "    clf_report1 = classification_report(y_true.values, y_1)\n",
    "    print(clf_report1)\n",
    "\n",
    "    return F_score, recall, precision, best_t\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classes\n",
    "\n",
    "xtrain,xtest,ytrain,ytest = train_test_split(X_train,y,test_size=0.2,random_state=42)\n",
    "\n",
    "model = LogisticRegression(C=1.0,solver='lbfgs',max_iter=100,random_state=42)\n",
    "model.fit(xtrain,ytrain)\n",
    "\n",
    "y_pred = model.predict_proba(xtest)[:,1]\n",
    "f  = f_score(ytest, y_pred)\n",
    "\n",
    "roc_auc = roc_auc_score(ytest,y_pred)\n",
    "\n",
    "\n",
    "print(f,roc_auc)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:06.101047Z",
     "iopub.status.busy": "2023-11-07T05:58:06.100806Z",
     "iopub.status.idle": "2023-11-07T05:58:06.185205Z",
     "shell.execute_reply": "2023-11-07T05:58:06.125658Z",
     "shell.execute_reply.started": "2023-11-07T05:58:06.101010Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-22-e449666a26e3>:5: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  DP_CUST_LR_feature['DP_CUST_LR'] = y_DP_CUST_LR\n",
      "<ipython-input-22-e449666a26e3>:6: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  DP_CUST_LR_feature['DP_CUST_LR_linear'] = y_DP_CUST_LR_linear\n"
     ]
    }
   ],
   "source": [
    "y_DP_CUST_LR = model.predict_proba(DP_CUST_SUM_QZ_flag[feature_name])[:,1]\n",
    "y_DP_CUST_LR_linear = model.decision_function(DP_CUST_SUM_QZ_flag[feature_name])\n",
    "\n",
    "DP_CUST_LR_feature = DP_CUST_SUM_QZ_flag[['CUST_NO','FLAG']]\n",
    "DP_CUST_LR_feature['DP_CUST_LR'] = y_DP_CUST_LR\n",
    "DP_CUST_LR_feature['DP_CUST_LR_linear'] = y_DP_CUST_LR_linear\n",
    "DP_CUST_LR_feature = DP_CUST_LR_feature.drop('FLAG', axis = 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-26T06:47:12.589314Z",
     "iopub.status.busy": "2023-10-26T06:47:12.589030Z",
     "iopub.status.idle": "2023-10-26T06:47:12.592128Z",
     "shell.execute_reply": "2023-10-26T06:47:12.591569Z",
     "shell.execute_reply.started": "2023-10-26T06:47:12.589284Z"
    }
   },
   "source": [
    "### 资产表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:06.186500Z",
     "iopub.status.busy": "2023-11-07T05:58:06.186278Z",
     "iopub.status.idle": "2023-11-07T05:58:06.325105Z",
     "shell.execute_reply": "2023-11-07T05:58:06.324466Z",
     "shell.execute_reply.started": "2023-11-07T05:58:06.186470Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['DAY_FA_BAL', 'MAVER_FA_BAL', 'SAVER_FA_BAL', 'YAVER_FA_BAL', 'DAY_AUM_BAL', 'MAVER_AUM_BAL', 'SAVER_AUM_BAL', 'YAVER_AUM_BAL', 'TOT_IVST_BAL', 'MAVER_TOT_IVST_BAL', 'SAVER_TOT_IVST_BAL', 'YAVER_TOT_IVST_BAL']\n"
     ]
    }
   ],
   "source": [
    "CUST_FA_SUM_QZ_flag = CUST_FA_SUM_QZ.merge(TARGET_QZ[['CUST_NO','FLAG']],on = 'CUST_NO', how = 'left')\n",
    "# 训练集\n",
    "train_df = CUST_FA_SUM_QZ_flag[~CUST_FA_SUM_QZ_flag['FLAG'].isnull()].reset_index(drop=True)\n",
    "# 测试集\n",
    "test_df = CUST_FA_SUM_QZ_flag[CUST_FA_SUM_QZ_flag['FLAG'].isnull()]\n",
    "\n",
    "\n",
    "\n",
    "# 预删除特征 \n",
    "drop_cols = ['DATA_DAT','CARD_NO','CUST_NO', 'FLAG']\n",
    "feature_name = [i for i in train_df.columns if i not in drop_cols]\n",
    "\n",
    "# 构建训练数据和测试数据\n",
    "X_train = train_df.copy()\n",
    "y = X_train['FLAG']\n",
    "X_train = X_train[feature_name]\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classesprint(feature_name)\n",
    "\n",
    "print(feature_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:06.326102Z",
     "iopub.status.busy": "2023-11-07T05:58:06.325910Z",
     "iopub.status.idle": "2023-11-07T05:58:07.204860Z",
     "shell.execute_reply": "2023-11-07T05:58:07.204095Z",
     "shell.execute_reply.started": "2023-11-07T05:58:06.326079Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳阈值:  0.09759254254437273\n",
      "打印分类报告\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "         0.0       0.98      0.82      0.89      7046\n",
      "         1.0       0.15      0.62      0.24       364\n",
      "\n",
      "    accuracy                           0.81      7410\n",
      "   macro avg       0.56      0.72      0.56      7410\n",
      "weighted avg       0.94      0.81      0.86      7410\n",
      "\n",
      "(0.37812288993923027, 0.6153846153846154, 0.14873837981407703, 0.09759254254437273) 0.7886539553265355\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/woody/anaconda3/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:460: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "  n_iter_i = _check_optimize_result(\n",
      "<ipython-input-24-22fab856b6fa>:15: RuntimeWarning: invalid value encountered in true_divide\n",
      "  f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n"
     ]
    }
   ],
   "source": [
    "def f_score(y_true, y_pred):\n",
    "\n",
    "    precisions, recalls, thresholds = precision_recall_curve(y_true, y_pred)\n",
    "\n",
    "    # F1\n",
    "#     f1_scores = (2 * precisions * recalls) / (precisions + recalls)\n",
    "#     best_t = thresholds[np.argmax(f1_scores[np.isfinite(f1_scores)])]\n",
    "#     y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "#     recall = recall_score(y_true, y_1)\n",
    "#     precision = precision_score(y_true, y_1)\n",
    "#     F_score = f1_score(y_true, y_1)\n",
    "#     F_score = (2 * precision * recall) / (precision + recall)\n",
    "\n",
    "    # F2\n",
    "    f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n",
    "    best_t = thresholds[np.argmax(f2_scores[np.isfinite(f2_scores)])]\n",
    "    y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "    recall = recall_score(y_true, y_1)\n",
    "    precision = precision_score(y_true, y_1)\n",
    "    F_score = (5 * precision * recall) / (4*precision + recall)\n",
    "\n",
    "    #print(f\"valid's f1: {F_score}\")\n",
    "    print(\"最佳阈值: \", str(best_t))\n",
    "    print('打印分类报告')\n",
    "    clf_report1 = classification_report(y_true.values, y_1)\n",
    "    print(clf_report1)\n",
    "\n",
    "    return F_score, recall, precision, best_t\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classes\n",
    "\n",
    "xtrain,xtest,ytrain,ytest = train_test_split(X_train,y,test_size=0.2,random_state=42)\n",
    "\n",
    "model = LogisticRegression(C=1.0,solver='lbfgs',max_iter=100,random_state=42)\n",
    "model.fit(xtrain,ytrain)\n",
    "\n",
    "y_pred = model.predict_proba(xtest)[:,1]\n",
    "f  = f_score(ytest, y_pred)\n",
    "\n",
    "# y_pred_class = convert_prob_to_class(y_pred, 0.5)\n",
    "\n",
    "# f1 = f1_score(ytest,y_pred_class)\n",
    "roc_auc = roc_auc_score(ytest,y_pred)\n",
    "\n",
    "\n",
    "\n",
    "print(f,roc_auc)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:07.206268Z",
     "iopub.status.busy": "2023-11-07T05:58:07.206026Z",
     "iopub.status.idle": "2023-11-07T05:58:07.291298Z",
     "shell.execute_reply": "2023-11-07T05:58:07.290637Z",
     "shell.execute_reply.started": "2023-11-07T05:58:07.206240Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-25-2e3ca041ea75>:5: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  CUST_FA_feature['CUST_FA_LR'] = y_CUST_FA_LR\n",
      "<ipython-input-25-2e3ca041ea75>:6: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  CUST_FA_feature['CUST_FA_linear'] = y_CUST_FA_linear\n"
     ]
    }
   ],
   "source": [
    "y_CUST_FA_LR = model.predict_proba(CUST_FA_SUM_QZ_flag[feature_name])[:,1]\n",
    "y_CUST_FA_linear = model.decision_function(CUST_FA_SUM_QZ_flag[feature_name])\n",
    "\n",
    "CUST_FA_feature = CUST_FA_SUM_QZ_flag[['CUST_NO','FLAG']]\n",
    "CUST_FA_feature['CUST_FA_LR'] = y_CUST_FA_LR\n",
    "CUST_FA_feature['CUST_FA_linear'] = y_CUST_FA_linear\n",
    "CUST_FA_feature = CUST_FA_feature.drop('FLAG', axis = 1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 客户产品持有表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:07.292299Z",
     "iopub.status.busy": "2023-11-07T05:58:07.292121Z",
     "iopub.status.idle": "2023-11-07T05:58:07.415970Z",
     "shell.execute_reply": "2023-11-07T05:58:07.415385Z",
     "shell.execute_reply.started": "2023-11-07T05:58:07.292276Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>DATA_DAT</th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>PD_CNT</th>\n",
       "      <th>DP_IND</th>\n",
       "      <th>IL_IND</th>\n",
       "      <th>DCARD_IND</th>\n",
       "      <th>CCARD_IND</th>\n",
       "      <th>FNCG_IND</th>\n",
       "      <th>FUND_IND</th>\n",
       "      <th>BOND_IND</th>\n",
       "      <th>INSUR_IND</th>\n",
       "      <th>METAL_IND</th>\n",
       "      <th>PAY_IND</th>\n",
       "      <th>EBNK_IND</th>\n",
       "      <th>MB_IND</th>\n",
       "      <th>MS_IND</th>\n",
       "      <th>TDPT_PAY_IND</th>\n",
       "      <th>FLAG</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>19940614</td>\n",
       "      <td>d8649736e17aa30f6507646babb64e31</td>\n",
       "      <td>8</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>19940614</td>\n",
       "      <td>adcc2d5f9c28dd2e2e8eb01b3a61dc20</td>\n",
       "      <td>5</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>19940614</td>\n",
       "      <td>5f403e9b47be56e617b7153a6c35651b</td>\n",
       "      <td>6</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>19940614</td>\n",
       "      <td>01c2a4c34a3b0db9300169aa819d1893</td>\n",
       "      <td>5</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>19940614</td>\n",
       "      <td>f52f7eb640c40983ba05fb8ca68e1339</td>\n",
       "      <td>7</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   DATA_DAT                           CUST_NO  PD_CNT  DP_IND  IL_IND  \\\n",
       "0  19940614  d8649736e17aa30f6507646babb64e31       8       1       0   \n",
       "1  19940614  adcc2d5f9c28dd2e2e8eb01b3a61dc20       5       1       0   \n",
       "2  19940614  5f403e9b47be56e617b7153a6c35651b       6       1       0   \n",
       "3  19940614  01c2a4c34a3b0db9300169aa819d1893       5       1       0   \n",
       "4  19940614  f52f7eb640c40983ba05fb8ca68e1339       7       1       0   \n",
       "\n",
       "   DCARD_IND  CCARD_IND  FNCG_IND  FUND_IND  BOND_IND  INSUR_IND  METAL_IND  \\\n",
       "0          1          1         0         1         0          0          0   \n",
       "1          1          0         0         0         0          0          0   \n",
       "2          1          1         0         0         0          0          0   \n",
       "3          1          0         0         0         0          0          0   \n",
       "4          1          1         0         0         0          1          0   \n",
       "\n",
       "   PAY_IND  EBNK_IND  MB_IND  MS_IND  TDPT_PAY_IND  FLAG  \n",
       "0        0         1       1       1             1   0.0  \n",
       "1        0         1       1       0             1   0.0  \n",
       "2        0         1       1       1             1   0.0  \n",
       "3        0         1       1       1             1   0.0  \n",
       "4        0         1       1       1             1   0.0  "
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "TAGS_PROD_HOLD_QZ_flag = TAGS_PROD_HOLD_QZ.merge(TARGET_QZ[['CUST_NO','FLAG']],on = 'CUST_NO', how = 'left')#### 客户产品持有表\n",
    "TAGS_PROD_HOLD_QZ_flag.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:07.416972Z",
     "iopub.status.busy": "2023-11-07T05:58:07.416798Z",
     "iopub.status.idle": "2023-11-07T05:58:07.484680Z",
     "shell.execute_reply": "2023-11-07T05:58:07.484019Z",
     "shell.execute_reply.started": "2023-11-07T05:58:07.416951Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['PD_CNT', 'DP_IND', 'IL_IND', 'DCARD_IND', 'CCARD_IND', 'FNCG_IND', 'FUND_IND', 'BOND_IND', 'INSUR_IND', 'METAL_IND', 'PAY_IND', 'EBNK_IND', 'MB_IND', 'MS_IND', 'TDPT_PAY_IND']\n"
     ]
    }
   ],
   "source": [
    "TAGS_PROD_HOLD_QZ_flag = TAGS_PROD_HOLD_QZ.merge(TARGET_QZ[['CUST_NO','FLAG']],on = 'CUST_NO', how = 'left')\n",
    "# 训练集\n",
    "train_df = TAGS_PROD_HOLD_QZ_flag[~CUST_FA_SUM_QZ_flag['FLAG'].isnull()].reset_index(drop=True)\n",
    "# 测试集\n",
    "test_df = TAGS_PROD_HOLD_QZ_flag[CUST_FA_SUM_QZ_flag['FLAG'].isnull()]\n",
    "\n",
    "\n",
    "\n",
    "# 预删除特征 \n",
    "drop_cols = ['DATA_DAT','CARD_NO','CUST_NO', 'FLAG']\n",
    "feature_name = [i for i in train_df.columns if i not in drop_cols]\n",
    "\n",
    "# 构建训练数据和测试数据\n",
    "X_train = train_df.copy()\n",
    "y = X_train['FLAG']\n",
    "X_train = X_train[feature_name]\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classesprint(feature_name)\n",
    "\n",
    "print(feature_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:07.485954Z",
     "iopub.status.busy": "2023-11-07T05:58:07.485675Z",
     "iopub.status.idle": "2023-11-07T05:58:08.118625Z",
     "shell.execute_reply": "2023-11-07T05:58:08.117932Z",
     "shell.execute_reply.started": "2023-11-07T05:58:07.485920Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最佳阈值:  0.06705066519494139\n",
      "打印分类报告\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "         0.0       0.97      0.65      0.78      7016\n",
      "         1.0       0.10      0.69      0.17       394\n",
      "\n",
      "    accuracy                           0.65      7410\n",
      "   macro avg       0.54      0.67      0.48      7410\n",
      "weighted avg       0.93      0.65      0.75      7410\n",
      "\n",
      "(0.31570363466915186, 0.6878172588832487, 0.09977908689248896, 0.06705066519494139) 0.7234001759574924\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/woody/anaconda3/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:460: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "  n_iter_i = _check_optimize_result(\n"
     ]
    }
   ],
   "source": [
    "def f_score(y_true, y_pred):\n",
    "\n",
    "    precisions, recalls, thresholds = precision_recall_curve(y_true, y_pred)\n",
    "\n",
    "    # F1\n",
    "#     f1_scores = (2 * precisions * recalls) / (precisions + recalls)\n",
    "#     best_t = thresholds[np.argmax(f1_scores[np.isfinite(f1_scores)])]\n",
    "#     y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "#     recall = recall_score(y_true, y_1)\n",
    "#     precision = precision_score(y_true, y_1)\n",
    "#     F_score = f1_score(y_true, y_1)\n",
    "#     F_score = (2 * precision * recall) / (precision + recall)\n",
    "\n",
    "    # F2\n",
    "    f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n",
    "    best_t = thresholds[np.argmax(f2_scores[np.isfinite(f2_scores)])]\n",
    "    y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "    recall = recall_score(y_true, y_1)\n",
    "    precision = precision_score(y_true, y_1)\n",
    "    F_score = (5 * precision * recall) / (4*precision + recall)\n",
    "\n",
    "    #print(f\"valid's f1: {F_score}\")\n",
    "    print(\"最佳阈值: \", str(best_t))\n",
    "    print('打印分类报告')\n",
    "    clf_report1 = classification_report(y_true.values, y_1)\n",
    "    print(clf_report1)\n",
    "\n",
    "    return F_score, recall, precision, best_t\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classes\n",
    "\n",
    "xtrain,xtest,ytrain,ytest = train_test_split(X_train,y,test_size=0.2,random_state=42)\n",
    "\n",
    "model = LogisticRegression(C=1.0,solver='lbfgs',max_iter=100,random_state=42)\n",
    "model.fit(xtrain,ytrain)\n",
    "\n",
    "y_pred = model.predict_proba(xtest)[:,1]\n",
    "f  = f_score(ytest, y_pred)\n",
    "\n",
    "# y_pred_class = convert_prob_to_class(y_pred, 0.5)\n",
    "\n",
    "# f1 = f1_score(ytest,y_pred_class)\n",
    "roc_auc = roc_auc_score(ytest,y_pred)\n",
    "\n",
    "\n",
    "\n",
    "print(f,roc_auc)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:08.119741Z",
     "iopub.status.busy": "2023-11-07T05:58:08.119558Z",
     "iopub.status.idle": "2023-11-07T05:58:08.221104Z",
     "shell.execute_reply": "2023-11-07T05:58:08.220311Z",
     "shell.execute_reply.started": "2023-11-07T05:58:08.119717Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-29-a6b353da3ca5>:5: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  TAGS_PROD_feature['TAGS_PROD_LR'] = y_TAGS_PROD_LR\n",
      "<ipython-input-29-a6b353da3ca5>:6: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  TAGS_PROD_feature['TAGS_PROD_LR_linear'] = y_TAGS_PROD_linear\n"
     ]
    }
   ],
   "source": [
    "y_TAGS_PROD_LR = model.predict_proba(TAGS_PROD_HOLD_QZ_flag[feature_name])[:,1]\n",
    "y_TAGS_PROD_linear = model.decision_function(TAGS_PROD_HOLD_QZ_flag[feature_name])\n",
    "\n",
    "TAGS_PROD_feature = TAGS_PROD_HOLD_QZ_flag[['CUST_NO','FLAG']]\n",
    "TAGS_PROD_feature['TAGS_PROD_LR'] = y_TAGS_PROD_LR\n",
    "TAGS_PROD_feature['TAGS_PROD_LR_linear'] = y_TAGS_PROD_linear\n",
    "TAGS_PROD_feature = TAGS_PROD_feature.drop('FLAG', axis = 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:08.222443Z",
     "iopub.status.busy": "2023-11-07T05:58:08.222208Z",
     "iopub.status.idle": "2023-11-07T05:58:08.287733Z",
     "shell.execute_reply": "2023-11-07T05:58:08.287089Z",
     "shell.execute_reply.started": "2023-11-07T05:58:08.222413Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>TAGS_PROD_LR</th>\n",
       "      <th>TAGS_PROD_LR_linear</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>d8649736e17aa30f6507646babb64e31</td>\n",
       "      <td>0.017426</td>\n",
       "      <td>-4.032191</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>adcc2d5f9c28dd2e2e8eb01b3a61dc20</td>\n",
       "      <td>0.126306</td>\n",
       "      <td>-1.934023</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>5f403e9b47be56e617b7153a6c35651b</td>\n",
       "      <td>0.028076</td>\n",
       "      <td>-3.544373</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>01c2a4c34a3b0db9300169aa819d1893</td>\n",
       "      <td>0.078181</td>\n",
       "      <td>-2.467325</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>f52f7eb640c40983ba05fb8ca68e1339</td>\n",
       "      <td>0.017019</td>\n",
       "      <td>-4.056247</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                            CUST_NO  TAGS_PROD_LR  TAGS_PROD_LR_linear\n",
       "0  d8649736e17aa30f6507646babb64e31      0.017426            -4.032191\n",
       "1  adcc2d5f9c28dd2e2e8eb01b3a61dc20      0.126306            -1.934023\n",
       "2  5f403e9b47be56e617b7153a6c35651b      0.028076            -3.544373\n",
       "3  01c2a4c34a3b0db9300169aa819d1893      0.078181            -2.467325\n",
       "4  f52f7eb640c40983ba05fb8ca68e1339      0.017019            -4.056247"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "TAGS_PROD_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-26T07:01:34.246809Z",
     "iopub.status.busy": "2023-10-26T07:01:34.246509Z",
     "iopub.status.idle": "2023-10-26T07:01:34.250061Z",
     "shell.execute_reply": "2023-10-26T07:01:34.249363Z",
     "shell.execute_reply.started": "2023-10-26T07:01:34.246774Z"
    }
   },
   "source": [
    "### 合并资产、存款、持有表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:08.289068Z",
     "iopub.status.busy": "2023-11-07T05:58:08.288833Z",
     "iopub.status.idle": "2023-11-07T05:58:09.517402Z",
     "shell.execute_reply": "2023-11-07T05:58:09.516684Z",
     "shell.execute_reply.started": "2023-11-07T05:58:08.289030Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['DPSA_BAL', 'MAVER_DPSA_BAL', 'SAVER_DPSA_BAL', 'YAVER_DPSA_BAL', 'TD_BAL', 'MAVER_TD_BAL', 'SAVER_TD_BAL', 'YAVER_TD_BAL', 'DAY_FA_BAL', 'MAVER_FA_BAL', 'SAVER_FA_BAL', 'YAVER_FA_BAL', 'DAY_AUM_BAL', 'MAVER_AUM_BAL', 'SAVER_AUM_BAL', 'YAVER_AUM_BAL', 'TOT_IVST_BAL', 'MAVER_TOT_IVST_BAL', 'SAVER_TOT_IVST_BAL', 'YAVER_TOT_IVST_BAL', 'PD_CNT', 'DP_IND', 'IL_IND', 'DCARD_IND', 'CCARD_IND', 'FNCG_IND', 'FUND_IND', 'BOND_IND', 'INSUR_IND', 'METAL_IND', 'PAY_IND', 'EBNK_IND', 'MB_IND', 'MS_IND', 'TDPT_PAY_IND']\n",
      "最佳阈值:  0.06609983009607635\n",
      "打印分类报告\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "         0.0       0.98      0.77      0.86      8025\n",
      "         1.0       0.13      0.71      0.23       403\n",
      "\n",
      "    accuracy                           0.77      8428\n",
      "   macro avg       0.56      0.74      0.54      8428\n",
      "weighted avg       0.94      0.77      0.83      8428\n",
      "\n",
      "(0.3817305116528261, 0.707196029776675, 0.13437057991513437, 0.06609983009607635) 0.7879195751489994\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/woody/anaconda3/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:460: ConvergenceWarning: lbfgs failed to converge (status=1):\n",
      "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n",
      "\n",
      "Increase the number of iterations (max_iter) or scale the data as shown in:\n",
      "    https://scikit-learn.org/stable/modules/preprocessing.html\n",
      "Please also refer to the documentation for alternative solver options:\n",
      "    https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n",
      "  n_iter_i = _check_optimize_result(\n",
      "<ipython-input-31-aca5de1490d3>:41: RuntimeWarning: invalid value encountered in true_divide\n",
      "  f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n"
     ]
    }
   ],
   "source": [
    "DP_FA_PROD_SUM = TARGET_QZ[['CUST_NO','FLAG']].merge(DP_CUST_SUM_QZ.drop('DATA_DAT',axis = 1),on = 'CUST_NO', how = 'left')\n",
    "DP_FA_PROD_SUM = DP_FA_PROD_SUM.merge(CUST_FA_SUM_QZ.drop('DATA_DAT',axis = 1),on = 'CUST_NO', how = 'left')\n",
    "DP_FA_PROD_SUM = DP_FA_PROD_SUM.merge(TAGS_PROD_HOLD_QZ.drop('DATA_DAT',axis = 1),on = 'CUST_NO', how = 'left')\n",
    "DP_FA_PROD_SUM = DP_FA_PROD_SUM.fillna(0)\n",
    "# 训练集\n",
    "train_df = DP_FA_PROD_SUM[~DP_FA_PROD_SUM['FLAG'].isnull()].reset_index(drop=True)\n",
    "# 测试集\n",
    "test_df = DP_FA_PROD_SUM[DP_FA_PROD_SUM['FLAG'].isnull()]\n",
    "\n",
    "\n",
    "\n",
    "# 预删除特征 \n",
    "drop_cols = ['DATA_DAT','CARD_NO','CUST_NO', 'FLAG']\n",
    "feature_name = [i for i in train_df.columns if i not in drop_cols]\n",
    "\n",
    "# 构建训练数据和测试数据\n",
    "X_train = train_df.copy()\n",
    "y = X_train['FLAG']\n",
    "X_train = X_train[feature_name]\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classesprint(feature_name)\n",
    "\n",
    "print(feature_name)\n",
    "\n",
    "def f_score(y_true, y_pred):\n",
    "\n",
    "    precisions, recalls, thresholds = precision_recall_curve(y_true, y_pred)\n",
    "\n",
    "    # F1\n",
    "#     f1_scores = (2 * precisions * recalls) / (precisions + recalls)\n",
    "#     best_t = thresholds[np.argmax(f1_scores[np.isfinite(f1_scores)])]\n",
    "#     y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "#     recall = recall_score(y_true, y_1)\n",
    "#     precision = precision_score(y_true, y_1)\n",
    "#     F_score = f1_score(y_true, y_1)\n",
    "#     F_score = (2 * precision * recall) / (precision + recall)\n",
    "\n",
    "    # F2\n",
    "    f2_scores = (5 * precisions * recalls) / (4*precisions + recalls)\n",
    "    best_t = thresholds[np.argmax(f2_scores[np.isfinite(f2_scores)])]\n",
    "    y_1 = [1 if x >= best_t else 0 for x in y_pred]\n",
    "    recall = recall_score(y_true, y_1)\n",
    "    precision = precision_score(y_true, y_1)\n",
    "    F_score = (5 * precision * recall) / (4*precision + recall)\n",
    "\n",
    "    #print(f\"valid's f1: {F_score}\")\n",
    "    print(\"最佳阈值: \", str(best_t))\n",
    "    print('打印分类报告')\n",
    "    clf_report1 = classification_report(y_true.values, y_1)\n",
    "    print(clf_report1)\n",
    "\n",
    "    return F_score, recall, precision, best_t\n",
    "\n",
    "def convert_prob_to_class(probabilites, threshold = 0.5):\n",
    "    classes = [1 if prob>=threshold else 0 for prob in probabilites]\n",
    "    return classes\n",
    "\n",
    "xtrain,xtest,ytrain,ytest = train_test_split(X_train,y,test_size=0.2,random_state=42)\n",
    "\n",
    "model = LogisticRegression(C=1.0,solver='lbfgs',max_iter=100,random_state=42)\n",
    "model.fit(xtrain,ytrain)\n",
    "\n",
    "y_pred = model.predict_proba(xtest)[:,1]\n",
    "f  = f_score(ytest, y_pred)\n",
    "\n",
    "# y_pred_class = convert_prob_to_class(y_pred, 0.5)\n",
    "\n",
    "# f1 = f1_score(ytest,y_pred_class)\n",
    "roc_auc = roc_auc_score(ytest,y_pred)\n",
    "\n",
    "\n",
    "\n",
    "print(f,roc_auc)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:09.518672Z",
     "iopub.status.busy": "2023-11-07T05:58:09.518444Z",
     "iopub.status.idle": "2023-11-07T05:58:09.617924Z",
     "shell.execute_reply": "2023-11-07T05:58:09.617303Z",
     "shell.execute_reply.started": "2023-11-07T05:58:09.518642Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-32-ae1104954e32>:5: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  DPFAPROD_SUM_feature['DPFAPROD_SUM_LR'] = y_DPFAPROD_SUM_LR\n",
      "<ipython-input-32-ae1104954e32>:6: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  DPFAPROD_SUM_feature['DPFAPROD_SUM_linear'] = y_DPFAPROD_SUM_linear\n"
     ]
    }
   ],
   "source": [
    "y_DPFAPROD_SUM_LR = model.predict_proba(DP_FA_PROD_SUM[feature_name])[:,1]\n",
    "y_DPFAPROD_SUM_linear = model.decision_function(DP_FA_PROD_SUM[feature_name])\n",
    "\n",
    "DPFAPROD_SUM_feature = DP_FA_PROD_SUM[['CUST_NO','FLAG']]\n",
    "DPFAPROD_SUM_feature['DPFAPROD_SUM_LR'] = y_DPFAPROD_SUM_LR\n",
    "DPFAPROD_SUM_feature['DPFAPROD_SUM_linear'] = y_DPFAPROD_SUM_linear\n",
    "DPFAPROD_SUM_feature = DPFAPROD_SUM_feature.drop('FLAG', axis = 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:09.619220Z",
     "iopub.status.busy": "2023-11-07T05:58:09.618980Z",
     "iopub.status.idle": "2023-11-07T05:58:09.689231Z",
     "shell.execute_reply": "2023-11-07T05:58:09.688551Z",
     "shell.execute_reply.started": "2023-11-07T05:58:09.619190Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>CUST_NO</th>\n",
       "      <th>DPFAPROD_SUM_LR</th>\n",
       "      <th>DPFAPROD_SUM_linear</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>3e9453f630dbc8b3f9682e7f8b721e6a</td>\n",
       "      <td>0.006977</td>\n",
       "      <td>-4.958178</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>ddd990d391f651a105ee0ca5f889cd07</td>\n",
       "      <td>0.021551</td>\n",
       "      <td>-3.815528</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>5bda5dd2bfdad71cabac599d568a8e2d</td>\n",
       "      <td>0.019995</td>\n",
       "      <td>-3.892058</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>761723ab80a522678da8be233db4039b</td>\n",
       "      <td>0.114985</td>\n",
       "      <td>-2.040804</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>86bcc214132678b544a75c5057be62ff</td>\n",
       "      <td>0.017991</td>\n",
       "      <td>-3.999712</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                            CUST_NO  DPFAPROD_SUM_LR  DPFAPROD_SUM_linear\n",
       "0  3e9453f630dbc8b3f9682e7f8b721e6a         0.006977            -4.958178\n",
       "1  ddd990d391f651a105ee0ca5f889cd07         0.021551            -3.815528\n",
       "2  5bda5dd2bfdad71cabac599d568a8e2d         0.019995            -3.892058\n",
       "3  761723ab80a522678da8be233db4039b         0.114985            -2.040804\n",
       "4  86bcc214132678b544a75c5057be62ff         0.017991            -3.999712"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "DPFAPROD_SUM_feature.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 合并特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-07T05:58:09.690446Z",
     "iopub.status.busy": "2023-11-07T05:58:09.690228Z",
     "iopub.status.idle": "2023-11-07T05:58:09.868104Z",
     "shell.execute_reply": "2023-11-07T05:58:09.867512Z",
     "shell.execute_reply.started": "2023-11-07T05:58:09.690418Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "LR_feature = DPFAPROD_SUM_feature.merge(TAGS_PROD_feature,on = 'CUST_NO', how = 'left')\n",
    "LR_feature = LR_feature.merge(DP_CUST_LR_feature,on = 'CUST_NO', how = 'left')\n",
    "LR_feature = LR_feature.merge(CUST_FA_feature,on = 'CUST_NO', how = 'left')\n",
    "\n",
    "\n",
    "CSTLOGQUERY_feature_file_name = feature_path+\"/B_LYH_LR_feature.pkl\"\n",
    "\n",
    "# 使用pickle.dump()将特征矩阵保存为二进制文件\n",
    "with open(CSTLOGQUERY_feature_file_name, 'wb') as file:\n",
    "    pickle.dump(LR_feature, file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  },
  "toc-autonumbering": true,
  "toc-showtags": false
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
