{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "62127bd7-d969-487f-a3be-321bd9937a69",
   "metadata": {},
   "source": [
    "# 导包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "8b6cfb02-6feb-44f1-af12-0292cfd8227c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:47:46.544620Z",
     "iopub.status.busy": "2024-11-03T06:47:46.544124Z",
     "iopub.status.idle": "2024-11-03T06:47:46.566505Z",
     "msg_id": "a63091c6-b459-42b7-a2c8-3bf6c337554b",
     "shell.execute_reply": "2024-11-03T06:47:46.565848Z",
     "shell.execute_reply.started": "2024-11-03T06:47:46.544585Z"
    }
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from pandas import DataFrame,Series\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib import rcParams\n",
    "rcParams[\"font.family\"] = \"SimHei\"\n",
    "%matplotlib inline\n",
    "import copy \n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "import os \n",
    "import gc\n",
    "import math\n",
    "import pickle\n",
    "import seaborn as sns\n",
    "\n",
    "from itertools import combinations\n",
    "from functools import partial\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "from sklearn.preprocessing import MinMaxScaler,StandardScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.linear_model import LogisticRegressionCV\n",
    "from sklearn.metrics import confusion_matrix,accuracy_score,classification_report,roc_auc_score,log_loss\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "import sklearn.metrics as metrics\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.metrics import roc_curve\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.feature_selection import RFECV\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "import sklearn.ensemble as ensemble\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn import svm\n",
    "from sklearn.feature_selection import SelectFromModel\n",
    "from sklearn.mixture import GaussianMixture\n",
    "\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "import xgboost as xgb\n",
    "from xgboost import XGBClassifier\n",
    "\n",
    "import lightgbm as lgb\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "\n",
    "import catboost as cbt\n",
    "from catboost import CatBoostClassifier\n",
    "\n",
    "import scipy\n",
    "from scipy import stats,integrate\n",
    "from scipy.stats import ks_2samp\n",
    "#from scipy.stats import kssamp\n",
    "from scipy.stats import pearsonr\n",
    "from sklearn.model_selection import RandomizedSearchCV\n",
    "from scipy.stats import uniform\n",
    "from scipy.stats import kstest\n",
    "\n",
    "import toad\n",
    "from toad.plot import bin_plot\n",
    "\n",
    "import joblib\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "\n",
    "pd.set_option('display.max_columns', 200)\n",
    "pd.set_option('display.max_rows', 200)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "618c04d8-5b16-41b3-9c6a-f28debfdf9f6",
   "metadata": {},
   "source": [
    "# 定义函数"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e42b06e6-6430-4541-b5c4-9879f8cc011d",
   "metadata": {},
   "source": [
    "## LightGBM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3f5b6a6a-35b7-4054-91fb-6737a0edea2f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:17.957918Z",
     "iopub.status.busy": "2024-11-03T06:25:17.957336Z",
     "iopub.status.idle": "2024-11-03T06:25:17.972531Z",
     "msg_id": "ea93fb6d-a18c-46fc-be87-7dcc8da393e8",
     "shell.execute_reply": "2024-11-03T06:25:17.971774Z",
     "shell.execute_reply.started": "2024-11-03T06:25:17.957889Z"
    }
   },
   "outputs": [],
   "source": [
    "def LGB_model(\n",
    "              X=None,\n",
    "              y=None,\n",
    "              params=None,\n",
    "              num_boost_round=10000,\n",
    "              categorical_feature=None,\n",
    "              cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=2022)\n",
    "             ):\n",
    "    \n",
    "    callbacks = [log_evaluation(period=100), early_stopping(stopping_rounds = 200)]\n",
    "    if params is None:\n",
    "        params = {\n",
    "                    \"boost\":\"gbdt\",\n",
    "                    \"objective\":\"binary\",\n",
    "                    \"metric\":\"auc\",\n",
    "                    \"max_depth\":6,\n",
    "                    \"learning_rate\":0.05,\n",
    "                    \"feature_fraction\":0.85,\n",
    "                    \"bagging_fraction\":0.85,\n",
    "                    \"bagging_freq\":5,\n",
    "                    \"max_bin\":56,\n",
    "                    \"seed\":2022,    # 随机数种子，必须设置\n",
    "                    \"verbose\":-1\n",
    "                }\n",
    "\n",
    "    columns = X.columns.tolist() \n",
    "    \n",
    "    y_oof = np.zeros(X.shape[0])\n",
    "    score = 0\n",
    "    score_auc = 0\n",
    "    clfs = []\n",
    "    ks_list = []\n",
    "    for k, (trian_index, valid_index) in enumerate(cv.split(X, y)):\n",
    "        \n",
    "        X_train, y_train = X.values[trian_index], y.values[trian_index]\n",
    "        X_valid, y_valid = X.values[valid_index], y.values[valid_index]\n",
    "        train_D = lgb.Dataset(data=X_train, label=y_train, feature_name=columns, categorical_feature=categorical_feature)\n",
    "        valid_D = lgb.Dataset(data=X_valid, label=y_valid, feature_name=columns, categorical_feature=categorical_feature, reference=train_D)\n",
    "        \n",
    "        clf = lgb.train(params=params,\n",
    "                        train_set=train_D,\n",
    "                        valid_sets=[train_D, valid_D],\n",
    "                        valid_names=[\"Train\", \"Valid\"],\n",
    "                        num_boost_round=num_boost_round,\n",
    "                        callbacks = callbacks\n",
    "                        )\n",
    "        y_pred_valid = clf.predict(X_valid, num_iteration=clf.best_iteration)\n",
    "        y_oof[valid_index] = y_pred_valid\n",
    "        print(\"=======================================\")\n",
    "        print(\"第 {} 折，当前 KS = {:.6}\".format(k+1, get_KS(y_valid, y_pred_valid)))\n",
    "        print(\"=======================================\")\n",
    "        score = score + get_KS(y_valid, y_pred_valid)\n",
    "        score_auc = score_auc + roc_auc_score(y_valid, y_pred_valid)\n",
    "        ks_list.append(get_KS(y_valid, y_pred_valid))\n",
    "        \n",
    "        # 测算最佳阈值\n",
    "        \n",
    "        del X_train, X_valid, y_train, y_valid\n",
    "        gc.collect()\n",
    "        \n",
    "        clfs.append(clf)\n",
    "    \n",
    "    ks_list.append(score/(k+1))\n",
    "    ks_list.append(get_KS(y, y_oof))\n",
    "    print(\"平均 KS = {:.6}\".format(score/(k+1)))\n",
    "    print(\"Out of folds KS = {:.6}\".format(get_KS(y, y_oof)))\n",
    "    \n",
    "    print(\"平均 AUC = {:.6}\".format(score_auc/(k+1)))\n",
    "    auc = roc_auc_score(y, y_oof)\n",
    "    print(\"Out of folds AUC = {:.6}\".format(auc))\n",
    "\n",
    "    return clfs, ks_list, get_KS(y, y_oof)\n",
    "\n",
    "## 计算KS\n",
    "def get_KS(y_true, y_pred):\n",
    "    fpr, tpr, _ = roc_curve(y_true, y_pred)\n",
    "    return max(abs((fpr-tpr)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "26e201c6-6b32-4e8b-9b86-3533531565ac",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:18.898458Z",
     "iopub.status.busy": "2024-11-03T06:25:18.897951Z",
     "iopub.status.idle": "2024-11-03T06:25:18.905884Z",
     "msg_id": "efb11676-b192-4a23-b822-4231a51a91b5",
     "shell.execute_reply": "2024-11-03T06:25:18.905199Z",
     "shell.execute_reply.started": "2024-11-03T06:25:18.898427Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_feature_imp(clfs, imp_type='gain', feature_names=None, top_n=25):\n",
    "    \"\"\"\n",
    "    获取模型训练时的特征重要性，并绘图。\n",
    "    \"\"\"\n",
    "    feature_importances = pd.DataFrame()\n",
    "    feature_importances['feature'] = feature_names\n",
    "    for i, clf in enumerate(clfs):\n",
    "        feature_importances[str(i)] = clf.feature_importance(imp_type)\n",
    "    feature_importances['average'] = np.exp(np.log1p(feature_importances[[str(i) for i in range(len(clfs))]]).mean(axis=1))\n",
    "    \n",
    "    plt.figure(figsize=(20, 16))\n",
    "    sns.barplot(data=feature_importances.sort_values(by='average', ascending=False).head(top_n), x='average', y='feature');\n",
    "    plt.title('{} TOP feature importance over {} folds average gain'.format(top_n, 5));\n",
    "    return feature_importances.sort_values(by='average', ascending=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "72c19f2f-56d8-4195-8c43-51e97dfeeeb5",
   "metadata": {},
   "source": [
    "## XGBoost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "79cf3394-4220-4f26-9a9e-43019bf74d93",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:19.282920Z",
     "iopub.status.busy": "2024-11-03T06:25:19.282448Z",
     "iopub.status.idle": "2024-11-03T06:25:19.294525Z",
     "msg_id": "5b585c90-945d-474c-aea3-6c3d4f3864ca",
     "shell.execute_reply": "2024-11-03T06:25:19.293842Z",
     "shell.execute_reply.started": "2024-11-03T06:25:19.282888Z"
    }
   },
   "outputs": [],
   "source": [
    "def XGB_model(\n",
    "              X=None,\n",
    "              y=None,\n",
    "              params=None,\n",
    "              num_boost_round=10000,\n",
    "              early_stopping_rounds=200,\n",
    "              cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=2022)\n",
    "             ):\n",
    "    \n",
    "    \n",
    "    if params is None:\n",
    "        params = {\n",
    "                'booster': 'gbtree',\n",
    "                'eval_metric': 'auc',\n",
    "                'gamma': 1,\n",
    "                'min_child_weight': 50,\n",
    "                'max_depth': 3,\n",
    "                'lambda':1,\n",
    "                'objective':'binary:logistic',\n",
    "                'learning_rate': 0.01,\n",
    "                'random_state':2022\n",
    "                }\n",
    "\n",
    "    columns = X.columns.tolist() \n",
    "    \n",
    "    y_oof = np.zeros(X.shape[0])\n",
    "    score = 0\n",
    "    clfs = []\n",
    "    for k, (trian_index, valid_index) in enumerate(cv.split(X, y)):\n",
    "        X_train, y_train = X.values[trian_index], y.values[trian_index]\n",
    "        X_valid, y_valid = X.values[valid_index], y.values[valid_index]\n",
    "        train_matrix = xgb.DMatrix(data=X_train, label=y_train)\n",
    "        valid_matrix = xgb.DMatrix(data=X_valid, label=y_valid)\n",
    "        \n",
    "        watch_list = [(train_matrix, 'train'), (valid_matrix, 'valid')]\n",
    "        clf = xgb.train(params, \n",
    "                          train_matrix, \n",
    "                          evals=watch_list, \n",
    "                          num_boost_round=num_boost_round, \n",
    "                          early_stopping_rounds=early_stopping_rounds,\n",
    "                          verbose_eval=100, \n",
    "                         )\n",
    "        y_pred_valid = clf.predict(valid_matrix)\n",
    "        \n",
    "        y_oof[valid_index] = y_pred_valid\n",
    "        print(\"=======================================\")\n",
    "        print(\"第 {} 折，当前 KS = {:.6}\".format(k+1, get_KS(y_valid, y_pred_valid)))\n",
    "        print(\"第 {} 折，当前 AUC = {:.6}\".format(k+1, roc_auc_score(y_valid, y_pred_valid)))\n",
    "        print(\"=======================================\")\n",
    "        score = score + get_KS(y_valid, y_pred_valid)\n",
    "        \n",
    "        del X_train, X_valid, y_train, y_valid\n",
    "        gc.collect()\n",
    "        \n",
    "        clfs.append(clf)\n",
    "    \n",
    "    print(\"平均 KS = {:.6}\".format(score/(k+1)))\n",
    "    print(\"Out of folds KS = {:.6}\".format(get_KS(y, y_oof)))\n",
    "    print(\"Out of folds AUC = {:.6}\".format(roc_auc_score(y, y_oof)))\n",
    "    \n",
    "    return clfs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "b83d4afa-3ac9-45e0-bdc9-0c6967098c58",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:19.434601Z",
     "iopub.status.busy": "2024-11-03T06:25:19.434108Z",
     "iopub.status.idle": "2024-11-03T06:25:19.442441Z",
     "msg_id": "399ed8de-ff21-4fd9-b590-290c5a7544c4",
     "shell.execute_reply": "2024-11-03T06:25:19.441806Z",
     "shell.execute_reply.started": "2024-11-03T06:25:19.434573Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_feature_imp_XGB(clfs, imp_type='gain', feature_names=None, top_n=25):\n",
    "    \"\"\"\n",
    "    获取模型训练时的特征重要性，并绘图。\n",
    "    \"\"\"\n",
    "    feature_importances = pd.DataFrame()\n",
    "    feature_importances[\"feature_中文\"] = feature_names\n",
    "    feature_importances['feature'] = clfs[0].get_score(importance_type=imp_type).keys()\n",
    "    for i, clf in enumerate(clfs):\n",
    "        tmp_imp = pd.DataFrame()\n",
    "        tmp_imp[\"feature\"] = clf.get_score(importance_type=imp_type).keys()\n",
    "        tmp_imp[\"score_{}\".format(i+1)] = clf.get_score(importance_type=imp_type).values()\n",
    "\n",
    "        feature_importances = feature_importances.merge(tmp_imp, how=\"left\", left_on=\"feature\", right_on=\"feature\")\n",
    "\n",
    "    feature_importances['average'] = np.exp(np.log1p(feature_importances[[\"score_{}\".format(i+1) for i in range(len(clfs))]]).mean(axis=1))     \n",
    "\n",
    "    plt.figure(figsize=(20, 16))\n",
    "    sns.barplot(data=feature_importances.sort_values(by='average', ascending=False).head(top_n), x='average', y='feature');\n",
    "    plt.title('{} TOP feature importance over {} folds average gain'.format(top_n, 5));\n",
    "    return feature_importances.sort_values(by='average', ascending=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b6a31f34-c64e-4116-80ba-f4f133850e08",
   "metadata": {},
   "source": [
    "## CatBoost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "4ef6e237-d2e0-46cb-93b6-5d738cc5f65d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:19.777800Z",
     "iopub.status.busy": "2024-11-03T06:25:19.777277Z",
     "iopub.status.idle": "2024-11-03T06:25:19.788899Z",
     "msg_id": "9f152d6a-9c46-4420-b72c-a9a552c99957",
     "shell.execute_reply": "2024-11-03T06:25:19.788177Z",
     "shell.execute_reply.started": "2024-11-03T06:25:19.777772Z"
    }
   },
   "outputs": [],
   "source": [
    "def CB_model(\n",
    "              X,\n",
    "              y,\n",
    "              params=None,\n",
    "              categorical_feature=None,\n",
    "              cv=StratifiedKFold(n_splits=5, shuffle=True, random_state=2025)\n",
    "             ):\n",
    "    \n",
    "    \n",
    "    if params is None:\n",
    "        params = {\n",
    "                   \"loss_function\":\"Logloss\",\n",
    "                   \"eval_metric\":\"AUC\",\n",
    "                   \"task_type\":\"CPU\",\n",
    "                   \"learning_rate\":0.01,\n",
    "                   \"depth\":7,\n",
    "                   \"iterations\":5000,\n",
    "                   \"early_stopping_rounds\":200,\n",
    "                   \"random_seed\":2020,\n",
    "                   \"od_type\":\"Iter\"\n",
    "                }\n",
    "\n",
    "    y_oof = np.zeros(X.shape[0])\n",
    "    score = 0\n",
    "    clfs = []\n",
    "    ks_list = []\n",
    "    threshold_score_list = []\n",
    "    for k, (trian_index, valid_index) in enumerate(cv.split(X, y)):\n",
    "        \n",
    "        X_train, y_train = X.iloc[trian_index], y.iloc[trian_index]\n",
    "        X_valid, y_valid = X.iloc[valid_index], y.iloc[valid_index]\n",
    "        \n",
    "        # sklearn API\n",
    "        clf = cb.CatBoostClassifier(**params)\n",
    "        clf = clf.fit(X_train,\n",
    "                      y_train, \n",
    "                      cat_features=categorical_feature,\n",
    "                      eval_set=(X_valid,y_valid),\n",
    "                      verbose=500\n",
    "                     )\n",
    "        y_pred_valid = clf.predict(X_valid, prediction_type=\"Probability\")[:,-1]\n",
    "        \n",
    "        y_oof[valid_index] = y_pred_valid\n",
    "        print(\"=======================================\")\n",
    "        print(\"第 {} 折，当前 AUC = {:.6}\".format(k+1, roc_auc_score(y_valid, y_pred_valid)))\n",
    "        print(\"第 {} 折，当前 KS = {:.6}\".format(k+1, get_KS(y_valid, y_pred_valid)))\n",
    "        print(\"=======================================\")\n",
    "        score = score + get_KS(y_valid, y_pred_valid)\n",
    "        ks_list.append(get_KS(y_valid, y_pred_valid))\n",
    "        \n",
    "        \n",
    "        del X_train, X_valid, y_train, y_valid\n",
    "        gc.collect()\n",
    "        \n",
    "        clfs.append(clf)\n",
    "    \n",
    "    ks_list.append(score/(k+1))\n",
    "    ks_list.append(get_KS(y, y_oof))\n",
    "    print(\"平均 KS = {:.6}\".format(score/(k+1)))\n",
    "    print(\"Out of folds KS = {:.6}\".format(get_KS(y, y_oof)))\n",
    "    print(\"Out of folds AUC = {:.6}\".format(roc_auc_score(y, y_oof)))\n",
    "    \n",
    "    return clfs, ks_list"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "53a434ac-3fb3-47ef-9944-957d96d357fc",
   "metadata": {},
   "source": [
    "## 读取数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "8e2172a4-dbcc-4a08-b413-dad39c2257c2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:20.145367Z",
     "iopub.status.busy": "2024-11-03T06:25:20.144824Z",
     "iopub.status.idle": "2024-11-03T06:25:20.155359Z",
     "msg_id": "43a15ca3-dc75-4707-849a-640f8807369a",
     "shell.execute_reply": "2024-11-03T06:25:20.154622Z",
     "shell.execute_reply.started": "2024-11-03T06:25:20.145340Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_data(file_name, num_rows=None):\n",
    "    train_path = \"/home/mole/work/contest/train\"\n",
    "    test_path = \"/home/mole/work/contest/A\"\n",
    "    df_train = pd.read_csv(os.path.join(train_path, file_name + \"_T.csv\"), nrows=num_rows)\n",
    "    df_test = pd.read_csv(os.path.join(test_path, file_name + \"_A.csv\"), nrows=num_rows)\n",
    "    df_train[\"is_train\"] = 1\n",
    "    df_test[\"is_train\"] = 0\n",
    "\n",
    "    df = pd.concat(objs=[df_train, df_test],axis=0)\n",
    "    df.rename(mapper = {'DATA_DAT': '数据日期', 'CUST_NO': '客户编号', 'OPTO': '经营期限至', 'OPFROM': '经营期限自', 'ENTSTATUS': '经营状态', 'REGCAP': '注册资本', 'ESDATE': '成立日期', 'FRNAME': '法定代表人/负责人/执行事务合伙人', 'ENTTYPE_CD': '企业（机构）类型编码', 'REGPROVIN_CD': '所在省份编码', 'INDS_CD': '国民经济行业代码', 'ALTDATE': '变更日期', 'ALTITEM': '变更事项', 'PERNAME': '人员姓名', 'POSITIONCODE': '职位代码', 'PERSONAMOUNT': '人员总数量', 'WEBTYPE': '网站（网店）类型', 'WEBSITNAME': '网站（网店）名称', 'DOMAIN': '网站（网店）地址', 'ANCHEDATE': '年报日期', 'ANCHEYEAR': '年报年份', 'EXECMONEY': '执行标的', 'REGDATECLEAN': '立案时间', 'COURTNAME': '执行法院', 'CASECODE': '案号', 'PUBLISHDATECLEAN': '发布时间', 'GISTID': '执行依据文号', 'PERFORMANCE': '被执行人履行情况', 'REGDATE': '立案时间', 'FINALDATE': '终本日期', 'UNPERFMONEY': '未履行金额', 'CONDATE': '出资日期', 'SUBCONAM': '认缴出资额（万元）', 'FUNDEDRATIO': '出资比例', 'INVTYPE': '股东类型', 'CONFORM': '出资方式', 'SH_CUST_NO': '股东客户编号', 'BTD_BEGINDATE': '所属日期起', 'BTD_ENDDATE': '所属日期止', 'BTD_COLLECTCODE': '征收项目代码', 'BTD_DECLARDATE': '申报日期', 'BTD_DECLARTERM': '申报期限', 'BTD_TOTALSALE': '全部销售收入', 'BTD_TAXABLESALE': '应税销售收入', 'BTD_TAXPAYABLE': '应纳税额', 'BTD_DEDUCTAMOUNT': '减免税额', 'TR_DAT': '交易日期', 'TR_CD': '交易代码', 'CHANL_CD': '渠道代码', 'ABS_INFO': '摘要信息', 'CPT_TYP_CD': '交易对手类型代码', 'ARG_ACCT_BAL': '合约账户余额', 'ACTG_DIRET_CD': '记账方向代码', 'TRS_CSH_IND': '现转标识', 'CSH_EX_IND': '钞汇标识', 'RMB_TR_AMT': '折人民币交易金额', 'CPT_INTL_FE_CUST_IND': '对手方行内客户标识', 'INT_BNK_TR_IND': '是否跨行交易', 'SAME_NAM_IND': '同名账户标识', 'CPT_CUST_NO': '交易对手客户编号'},\n",
    "              axis=1,\n",
    "              inplace=True\n",
    "             )\n",
    "    return df"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6eb31dca-aa91-4900-b285-001bfc9e7c28",
   "metadata": {},
   "source": [
    "## 特征编码及交叉"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "acec2730-e950-49aa-9bd8-e8686a965424",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:20.547093Z",
     "iopub.status.busy": "2024-11-03T06:25:20.546196Z",
     "iopub.status.idle": "2024-11-03T06:25:20.553510Z",
     "msg_id": "12985481-1725-46ae-9ab0-48f397169ff8",
     "shell.execute_reply": "2024-11-03T06:25:20.552578Z",
     "shell.execute_reply.started": "2024-11-03T06:25:20.547043Z"
    }
   },
   "outputs": [],
   "source": [
    "def encode_normal(data, categorical_columns):\n",
    "    for col in categorical_columns:\n",
    "        df, _ = data[col].factorize(sort=True)\n",
    "        if df.max() >= 32000:\n",
    "            data[col] = df.astype(\"int\")\n",
    "        else:\n",
    "            data[col] = df.astype(\"int16\")\n",
    "\n",
    "        del df\n",
    "        gc.collect()\n",
    "    return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "01580a89-61ee-459b-8e9f-f38afc6b8f1e",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:20.720443Z",
     "iopub.status.busy": "2024-11-03T06:25:20.719936Z",
     "iopub.status.idle": "2024-11-03T06:25:20.724620Z",
     "msg_id": "e81e3c76-1b75-4dd8-842b-7eef1e435fd4",
     "shell.execute_reply": "2024-11-03T06:25:20.724007Z",
     "shell.execute_reply.started": "2024-11-03T06:25:20.720415Z"
    }
   },
   "outputs": [],
   "source": [
    "def encode_frequency(data, categorical_cols):\n",
    "    for col in categorical_cols:\n",
    "        df = data[col]\n",
    "        vc = df.value_counts(dropna=False, normalize=True).to_dict()\n",
    "        new_col = col + \"频率编码\"\n",
    "        data[new_col] = data[col].map(vc).astype(\"float32\")\n",
    "\n",
    "    return data "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "892c9226-4859-490f-a351-a3efee46afa2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:20.935889Z",
     "iopub.status.busy": "2024-11-03T06:25:20.935437Z",
     "iopub.status.idle": "2024-11-03T06:25:20.940728Z",
     "msg_id": "a294778b-e84c-4a84-8448-84e8092ee4dc",
     "shell.execute_reply": "2024-11-03T06:25:20.940111Z",
     "shell.execute_reply.started": "2024-11-03T06:25:20.935863Z"
    }
   },
   "outputs": [],
   "source": [
    "def encode_CB(data, categorcial_cols):\n",
    "    \"\"\"\n",
    "    类别特征两两交叉。\n",
    "    \"\"\"\n",
    "    from itertools import combinations \n",
    "    \n",
    "    cross_cols = combinations(categorcial_cols, 2)\n",
    "    col_CB = []\n",
    "    for cross_col in cross_cols:\n",
    "        new_col = \"_\".join(cross_col) + \"_CB\"\n",
    "        data[new_col] = data[cross_col[0]].astype(\"str\") + \"_\" + data[cross_col[1]].astype(\"str\")\n",
    "        col_CB.append(new_col)\n",
    "    \n",
    "    print(\"New columns:\\n\", col_CB)\n",
    "    \n",
    "    return data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eee950d8-06be-41a0-a54e-bd9847c3292a",
   "metadata": {},
   "source": [
    "## 聚合函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0a216863-48f3-4a72-97e7-aa913642a8fc",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:21.352852Z",
     "iopub.status.busy": "2024-11-03T06:25:21.352383Z",
     "iopub.status.idle": "2024-11-03T06:25:21.358048Z",
     "msg_id": "dcea3a21-3b43-4b65-b76f-d340dae47ed6",
     "shell.execute_reply": "2024-11-03T06:25:21.357421Z",
     "shell.execute_reply.started": "2024-11-03T06:25:21.352825Z"
    }
   },
   "outputs": [],
   "source": [
    "def agg_statistics(df, group_cols, agg_functions, name_flag, p=False):\n",
    "    \"\"\"\n",
    "    分组聚合。\n",
    "    \"\"\"\n",
    "    ga = df.groupby(by=group_cols).agg(agg_functions)\n",
    "    ga.columns = [\"{}_{}_{}\".format(e[0], e[1], name_flag) for e in ga.columns.tolist()]\n",
    "    ga.reset_index(inplace=True)\n",
    "    \n",
    "    new_cols = [col for col in ga.columns.tolist() if col not in group_cols]\n",
    "    if p is True:\n",
    "        print(\"新聚合特征：\\n\", new_cols)\n",
    "    \n",
    "    return ga, new_cols"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "25f2eb27-2f95-478a-8246-f6c40e601ca1",
   "metadata": {},
   "source": [
    "## 差分函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "0f3ddfbb-85dc-4515-a28b-71e0e8d1c895",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:21.890800Z",
     "iopub.status.busy": "2024-11-03T06:25:21.890284Z",
     "iopub.status.idle": "2024-11-03T06:25:21.898746Z",
     "msg_id": "3b09e1a0-8132-4b15-905b-94075964b2a7",
     "shell.execute_reply": "2024-11-03T06:25:21.898021Z",
     "shell.execute_reply.started": "2024-11-03T06:25:21.890774Z"
    }
   },
   "outputs": [],
   "source": [
    "# 趋势差分特征衍生\n",
    "def get_kurt(series_x):\n",
    "    kurt = series_x.kurt()\n",
    "    return kurt\n",
    "    \n",
    "def trend_indicator(df, group_dim_1, group_dim_2, agg_functions, name_flag, offset=-1):\n",
    "    \"\"\"\n",
    "    group_dim_1：第一维度\n",
    "    group_dim_2：第二维度\n",
    "    \"\"\"\n",
    "    df = df.sort_values(by=group_dim_2, ascending=True)\n",
    "    ga = df.groupby(by=[group_dim_1, group_dim_2]).agg(agg_functions)\n",
    "    ga.columns = [\"{}_{}_{}\".format(e[0], name_flag, e[1]) for e in ga.columns.tolist()]\n",
    "    new_features = ga.columns.tolist()\n",
    "    ga.reset_index(inplace=True)\n",
    "\n",
    "    diff_new_features = []\n",
    "    for fea in new_features:\n",
    "        t = \"一阶差分_{}_{}\".format(fea, offset)\n",
    "        diff_new_features.append(t)\n",
    "        ga[t] = ga.groupby(by=group_dim_1)[fea].diff(offset) # -1\n",
    "\n",
    "    all_new_features = new_features + diff_new_features\n",
    "    agg_functions_tmp = {}\n",
    "    for fea in all_new_features:\n",
    "        agg_functions_tmp.update({fea:['last','mean','skew',get_kurt,'std','sum','max','min']})    \n",
    "\n",
    "    ga_new, _ = agg_statistics(df=ga, group_cols=[group_dim_1], agg_functions=agg_functions_tmp, name_flag=\"差分特征_{}\".format(name_flag))\n",
    "\n",
    "    return ga_new"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fb46edad-4a6a-4d86-ad4a-0f344ca551bc",
   "metadata": {},
   "source": [
    "## 日期处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "7f03247c-92b4-4053-87a8-8001ffe1fb57",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:22.678529Z",
     "iopub.status.busy": "2024-11-03T06:25:22.677979Z",
     "iopub.status.idle": "2024-11-03T06:25:22.684759Z",
     "msg_id": "6eede5c6-50b0-4b70-bc8f-b437e15af4e5",
     "shell.execute_reply": "2024-11-03T06:25:22.684119Z",
     "shell.execute_reply.started": "2024-11-03T06:25:22.678503Z"
    }
   },
   "outputs": [],
   "source": [
    "# 日期数据离散化\n",
    "def get_time_dis(date):\n",
    "    date = str(date).replace(\"-\", \"\")\n",
    "    year = int(date[0:4])\n",
    "    mon = int(date[4:6])\n",
    "    day = int(date[6:8])\n",
    "    \n",
    "    return year, mon, day\n",
    "\n",
    "# 计算两个日期之间的差距：天、月、年\n",
    "def two_date_dis(date1, date2):\n",
    "    date1 = get_time_dis(date1)\n",
    "    date2 = get_time_dis(date2)\n",
    "    \n",
    "    days = (date1[0] - date2[0])*365 + (date1[1] - date2[1])*30 + date1[2] - date2[2]\n",
    "    mons = (date1[0] - date2[0])*12 + (date1[1] - date2[1]) \n",
    "    years = (date1[0] - date2[0]) + (date1[1] - date2[1])/12 \n",
    "    return days, mons, years"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "ebc2ff65-aa0b-47c5-8ab7-78386215388d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:22.927512Z",
     "iopub.status.busy": "2024-11-03T06:25:22.926911Z",
     "iopub.status.idle": "2024-11-03T06:25:22.931205Z",
     "msg_id": "8b0d56db-5efa-4f05-882a-bcbb601acd59",
     "shell.execute_reply": "2024-11-03T06:25:22.930583Z",
     "shell.execute_reply.started": "2024-11-03T06:25:22.927487Z"
    }
   },
   "outputs": [],
   "source": [
    "def count_notzero(series_x):\n",
    "    mode = series_x[(series_x > 0)]\n",
    "    return mode.count()\n",
    "\n",
    "def count_zero(series_x):\n",
    "    mode = series_x[(series_x == 0)]\n",
    "    return mode.count()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "40f4b223-f649-47d7-a74f-16845d3180b1",
   "metadata": {},
   "source": [
    "## 文本处理，文本特征词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3d09a8c6-91ed-428e-8728-a98cb63425ca",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-13T00:41:31.783144Z",
     "iopub.status.busy": "2024-11-13T00:41:31.782636Z",
     "iopub.status.idle": "2024-11-13T00:41:31.787017Z",
     "msg_id": "189b5475-cd8b-4a41-b56d-1521861758c5",
     "shell.execute_reply": "2024-11-13T00:41:31.786395Z",
     "shell.execute_reply.started": "2024-11-13T00:41:31.783113Z"
    }
   },
   "outputs": [],
   "source": [
    "#!pip install scipy==1.11.3 #下面gensim跑不了报错时跑这个降低scipy的版本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "65867f8a-7410-4a7a-99fd-1d3d3bab899f",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-05T01:05:31.925725Z",
     "iopub.status.busy": "2024-11-05T01:05:31.923387Z",
     "iopub.status.idle": "2024-11-05T01:05:31.929671Z",
     "msg_id": "7a13c3f4-6e7c-4762-a11b-67e23ee0ffb2",
     "shell.execute_reply": "2024-11-05T01:05:31.928904Z",
     "shell.execute_reply.started": "2024-11-05T01:05:31.925680Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "from gensim.models import Word2Vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "fd5757b3-fab6-44d1-88e9-b9fc03abd96a",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:23.814653Z",
     "iopub.status.busy": "2024-11-03T06:25:23.814237Z",
     "iopub.status.idle": "2024-11-03T06:25:23.820007Z",
     "msg_id": "061b08c4-1072-4f75-a9f5-0063a53b2a68",
     "shell.execute_reply": "2024-11-03T06:25:23.819246Z",
     "shell.execute_reply.started": "2024-11-03T06:25:23.814623Z"
    }
   },
   "outputs": [],
   "source": [
    "def tfidf(df, col, n=8,seed=1024):\n",
    "    # 把文本转换为tf-idf的特征矩阵\n",
    "    tfidf_enc = TfidfVectorizer()\n",
    "    tfidf_vec = tfidf_enc.fit_transform(df[col])\n",
    "    # 降维,提取更精炼的主题\n",
    "    svd_tmp = TruncatedSVD(n_components=n, n_iter=20, random_state=seed)\n",
    "    svd_tmp = svd_tmp.fit_transform(tfidf_vec)\n",
    "    svd_tmp = pd.DataFrame(svd_tmp)\n",
    "    svd_tmp.columns = ['{}_tfidf_{}'.format(col, i) for i in range(n)]\n",
    "    return svd_tmp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "8a63880e-4794-4b43-8a77-df44acbbbec7",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:23.971629Z",
     "iopub.status.busy": "2024-11-03T06:25:23.971107Z",
     "iopub.status.idle": "2024-11-03T06:25:23.976342Z",
     "msg_id": "26f90ad0-a2f7-4045-9d7d-4c8fdb75903b",
     "shell.execute_reply": "2024-11-03T06:25:23.975707Z",
     "shell.execute_reply.started": "2024-11-03T06:25:23.971601Z"
    }
   },
   "outputs": [],
   "source": [
    "def count2vec(df, col, n=8, seed=1024):\n",
    "    count_enc = CountVectorizer()\n",
    "    count_vec = count_enc.fit_transform(df[col])\n",
    "    svd_tmp = TruncatedSVD(n_components=n, n_iter=20, random_state=seed)\n",
    "    svd_tmp = svd_tmp.fit_transform(count_vec)\n",
    "    svd_tmp = pd.DataFrame(svd_tmp)\n",
    "    svd_tmp.columns = ['{}_countvec_{}'.format(col, i) for i in range(n)]\n",
    "    return svd_tmp\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "5be14258-73da-465a-8d19-6a4594991f79",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:24.126959Z",
     "iopub.status.busy": "2024-11-03T06:25:24.126532Z",
     "iopub.status.idle": "2024-11-03T06:25:24.131812Z",
     "msg_id": "8ec9a132-0577-450c-b570-79e3345f65b0",
     "shell.execute_reply": "2024-11-03T06:25:24.131191Z",
     "shell.execute_reply.started": "2024-11-03T06:25:24.126933Z"
    }
   },
   "outputs": [],
   "source": [
    "def text_feats(df, group_id, col, num):\n",
    "    df[col] = df[col].astype(str)\n",
    "    temp = df.groupby(group_id)[col].agg(list).reset_index()\n",
    "    temp[col] = temp[col].apply(lambda x: ' '.join(x))\n",
    "    # 将list 转化为str\n",
    "    tfidf_temp = tfidf(temp, col, num)\n",
    "    count_temp = count2vec(temp, col, num)\n",
    "    return pd.concat([temp[group_id], tfidf_temp, count_temp], axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "2d56d83e-0d4d-423f-b1df-88ebc7201368",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-11-03T06:25:24.313531Z",
     "iopub.status.busy": "2024-11-03T06:25:24.312908Z",
     "iopub.status.idle": "2024-11-03T06:25:24.321463Z",
     "msg_id": "fa961f54-282e-4e3a-9aa5-b6a0e4cca22b",
     "shell.execute_reply": "2024-11-03T06:25:24.320791Z",
     "shell.execute_reply.started": "2024-11-03T06:25:24.313500Z"
    }
   },
   "outputs": [],
   "source": [
    "def word2vec_feature(df, group_id, col, ext='',dim=8):\n",
    "    df[col] = df[col].astype(str)\n",
    "    temp = df.groupby(group_id)[col].agg(list).reset_index()\n",
    "    sentence = temp[col].values.tolist()\n",
    "    model_path = 'tmp/w2v_model_{}_{}.model'.format(col,ext)\n",
    "    if os.path.exists(model_path):\n",
    "        model = Word2Vec.load(model_path)\n",
    "    else:\n",
    "        model = Word2Vec(sentence, vector_size=dim, window=5, min_count=1, workers=1, epochs=10, sg=1, seed=42)\n",
    "        model.save(model_path)\n",
    "\n",
    "    emb_matrix = []\n",
    "    for i in temp[col].values:\n",
    "        tmp = np.zeros(shape=(dim))\n",
    "        for seq in i:\n",
    "            tmp = tmp + model.wv[str(seq)] / len(i)\n",
    "        emb_matrix.append(tmp)\n",
    "    emb_matrix = np.array(emb_matrix)\n",
    "\n",
    "    for i in range(dim):\n",
    "        temp['{}_{}_{}'.format(group_id, col + '_w2v',i)] = emb_matrix[:,i] \n",
    "    del temp[col]\n",
    "\n",
    "    return temp"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "25b1c033-d6ad-4424-8c15-48ea7cf2b8c1",
   "metadata": {},
   "source": [
    "## 样本数据集距离计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f6df6ea-8a88-4d0f-be4f-c5aad0c68234",
   "metadata": {},
   "outputs": [],
   "source": [
    "def distance_eucliDist(data, sample):\n",
    "    res = []\n",
    "    data = np.array(data)\n",
    "    for i in data:\n",
    "        res.append(np.linalg.norm(i - sample))\n",
    "    return res\n",
    "\n",
    "def jaccard_similarity(df, sample):\n",
    "    # 将DataFrame和样本转换为布尔值\n",
    "    df_bool = df.astype(bool)\n",
    "    sample_bool = sample.astype(bool)\n",
    "    \n",
    "    # 计算交集和并集\n",
    "    intersection = df_bool & sample_bool\n",
    "    union = df_bool | sample_bool\n",
    "    \n",
    "    # 计算Jaccard相似度\n",
    "    similarities = intersection.sum(axis=1) / union.sum(axis=1)\n",
    "    return similarities\n",
    "\n",
    "#jaccard_similarities = jaccard_similarity(df, sample)\n",
    "#print(\"Jaccard Similarities:\\n\", jaccard_similarities)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c5376c0-5dc8-4e62-bb29-a83bbf7077d7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#from fastdtw import fastdtw\n",
    "from scipy.spatial.distance import euclidean\n",
    "\n",
    "def dtw_similarity(df, sample):\n",
    "    distances = []\n",
    "    for _, row in df.iterrows():\n",
    "        distance, _ = fastdtw(row, sample, dist=euclidean)\n",
    "        distances.append(distance)\n",
    "    return pd.Series(distances)\n",
    "\n",
    "#dtw_distances = dtw_similarity(df, sample)\n",
    "#print(\"DTW Distances:\\n\", dtw_distances)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "937199fa-4995-493a-8360-e4062cb2001b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from scipy.stats import entropy\n",
    "\n",
    "def kl_divergence(df, sample):\n",
    "    # 确保样本和DataFrame的每一行都是概率分布\n",
    "    sample_prob = sample / sample.sum()\n",
    "    df_prob = df.div(df.sum(axis=1), axis=0)\n",
    "    \n",
    "    # 计算KL散度\n",
    "    divergences = df_prob.apply(lambda row: entropy(row, sample_prob), axis=1)\n",
    "    return divergences\n",
    "\n",
    "#kl_divergences = kl_divergence(df, sample)\n",
    "#print(\"KL Divergences:\\n\", kl_divergences)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
