{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-23T11:20:28.881459Z",
     "iopub.status.busy": "2023-10-23T11:20:28.881172Z",
     "iopub.status.idle": "2023-10-23T11:20:28.889100Z",
     "shell.execute_reply": "2023-10-23T11:20:28.888437Z",
     "shell.execute_reply.started": "2023-10-23T11:20:28.881430Z"
    },
    "tags": []
   },
   "source": [
    "# 导包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from pandas import DataFrame,Series\n",
    "\n",
    "\n",
    "import os\n",
    "import gc\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "import catboost as cat\n",
    "import random\n",
    "\n",
    "from catboost import CatBoostRegressor\n",
    "import sklearn.metrics as metrics\n",
    "from xgboost import XGBClassifier\n",
    "\n",
    "from sklearn.linear_model import SGDRegressor, LinearRegression, Ridge\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.model_selection import StratifiedKFold, KFold\n",
    "from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, log_loss, precision_score, recall_score, classification_report\n",
    "from sklearn.utils import shuffle\n",
    "import math\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import datetime\n",
    "from scipy.stats import ks_2samp\n",
    "\n",
    "from string import Template\n",
    "import time\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "pd.set_option('display.max_rows', 1000)\n",
    "pd.set_option('display.max_columns', 1000)\n",
    "pd.set_option('display.max_colwidth', 100)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义函数\n",
    "## 函数_文件加载与路径、参数定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "filePath_Template=Template(\"${dir}/${tabName}.csv\")\n",
    "def load_fxb_data(tabName):\n",
    "    df_train=pd.read_csv(\"{}/{}.csv\".format(train_dir, tabName))\n",
    "    df_predict=pd.read_csv(\"{}/{}.csv\".format(test_dir, tabName + '_A'))\n",
    "    return df_train, df_predict"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 函数_模型训练 二分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-29T13:45:00.740336Z",
     "iopub.status.busy": "2023-10-29T13:45:00.740045Z",
     "iopub.status.idle": "2023-10-29T13:45:00.775648Z",
     "shell.execute_reply": "2023-10-29T13:45:00.775021Z",
     "shell.execute_reply.started": "2023-10-29T13:45:00.740305Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "def ks4xgb(preds, dtrain):\n",
    "    '''\n",
    "    目标：计算出分类模型的ks值\n",
    "    变量：\n",
    "    preds：预测值\n",
    "    dtrain：训练数据矩阵，带label\n",
    "    返回：string 类型的名称 和一个float类型的fevalerror值表示评价值的大小，其是以error的形式定义，即当此值越大是认为模型效果越差。\n",
    "         返回值为1-ks值\n",
    "\n",
    "    '''\n",
    "    label = dtrain.get_label()\n",
    "    fpr, tpr, p_threshold = metrics.roc_curve(label, preds,      drop_intermediate=False, pos_label=1)\n",
    "    df = pd.DataFrame({'fpr': fpr, 'tpr': tpr, 'p': p_threshold})\n",
    "    df.loc[0, 'p'] = max(preds)\n",
    "    ks = (df['tpr'] - df['fpr']).max()\n",
    "    #print(ks)\n",
    "    return '1-KS',float(1-ks)\n",
    "\n",
    "def gen_thres_new(df_train, oof_preds):\n",
    "    quantile_point = df_train.mean()\n",
    "    thres = df_train.quantile(1 - quantile_point)\n",
    "    _thresh = []\n",
    "    for thres_item in np.arange(max(0,thres - 0.3), thres + 0.3, 0.001):\n",
    "        _thresh.append(\n",
    "            [thres_item, f1_score(df_train, np.where(oof_preds > thres_item, 1, 0), average='binary')])\n",
    "    _thresh = np.array(_thresh)\n",
    "    best_id = _thresh[:, 1].argmax()\n",
    "    best_thresh = _thresh[best_id][0]\n",
    "    return best_thresh, _thresh[best_id][1]    \n",
    "\n",
    "def cv_model(clf, train_x, train_y, test_x, clf_name, seed ,threshold = 0.5):\n",
    "    folds = 7\n",
    "    #seed = 2063\n",
    "    kf = KFold(n_splits=folds, shuffle=True, random_state=seed)\n",
    "    #kf = KFold(n_splits=folds, shuffle=True)  #不使用种子数，随机产生结果\n",
    "\n",
    "    train = np.zeros(train_x.shape[0])\n",
    "    test = np.zeros(test_x.shape[0])\n",
    "    testtp = np.zeros(test_x.shape[0])\n",
    "    test_pred_p2 = np.zeros(test_x.shape[0])\n",
    "    cv_scores = []\n",
    "    val_f1_scores = []\n",
    "    trn_f1_scores = []\n",
    "    models = []\n",
    "\n",
    "    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):\n",
    "        print('************************************ {} - {} ************************************'.format(str(seed),str(i+1)))\n",
    "        trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index], train_x.iloc[valid_index], train_y[valid_index]\n",
    "\n",
    "        if clf_name == \"lgb\":\n",
    "            train_matrix = clf.Dataset(trn_x, label=trn_y)\n",
    "            valid_matrix = clf.Dataset(val_x, label=val_y)\n",
    "\n",
    "            params = {\n",
    "                'min_data_in_leaf': 300, \n",
    "                'boosting_type': 'gbdt',\n",
    "                'objective': 'binary',\n",
    "                'metric': 'auc',\n",
    "                'min_child_weight': 5,\n",
    "                'num_leaves': 2 ** 5 -1,\n",
    "                'lambda_l2': 10,\n",
    "                'feature_fraction': 0.8,\n",
    "                'bagging_fraction': 0.8,\n",
    "                'bagging_freq': 4,\n",
    "                'learning_rate': 0.03,\n",
    "                'seed': seed,\n",
    "                'nthread': 28,\n",
    "                'n_jobs':24,\n",
    "                'silent': True,\n",
    "                'verbose': -1,\n",
    "            }\n",
    "\n",
    "            model = clf.train(params, train_matrix, 50000, valid_sets=[train_matrix, valid_matrix], verbose_eval=500,early_stopping_rounds=500  )\n",
    "            trn_all_pred  = model.predict(train_x, num_iteration=model.best_iteration)\n",
    "            trn_pred = model.predict(trn_x, num_iteration=model.best_iteration) \n",
    "            val_pred = model.predict(val_x, num_iteration=model.best_iteration)\n",
    "            test_pred = model.predict(test_x, num_iteration=model.best_iteration)\n",
    "            best_iter = model.best_iteration\n",
    "            \n",
    "            xx = val_y.copy()\n",
    "            trn_best_thresh, trn_f1 = gen_thres_new(trn_y, trn_pred)\n",
    "            val_best_thresh, val_f1 = gen_thres_new(val_y, val_pred)\n",
    "            test_p2 = (test_pred>=val_best_thresh).astype(int)\n",
    "            test_pred_p2 += test_pred/(folds)\n",
    "\n",
    "\n",
    "            # print(list(sorted(zip(features, model.feature_importance(\"gain\")), key=lambda x: x[1], reverse=True))[:20])\n",
    "        \n",
    "\n",
    "        if clf_name == \"xgb\":\n",
    "            train_matrix = clf.DMatrix(trn_x , label=trn_y)\n",
    "            valid_matrix = clf.DMatrix(val_x , label=val_y)\n",
    "            test_matrix = clf.DMatrix(test_x)\n",
    "\n",
    "            params = {'booster': 'gbtree',\n",
    "                      'objective': 'binary:logistic',\n",
    "                      'eval_metric': 'auc',\n",
    "                      'gamma': 1,\n",
    "                      'min_child_weight': 1.5,\n",
    "                      'max_depth': 4,\n",
    "                      'lambda': 9,\n",
    "                      'subsample': 0.7,\n",
    "                      'colsample_bytree': 0.7,\n",
    "                      'colsample_bylevel': 0.7,\n",
    "                      'eta': 0.04,\n",
    "                      'tree_method': 'exact',\n",
    "                      'seed': seed,\n",
    "                      'nthread': 36,\n",
    "                      \"silent\": True,\n",
    "                      }\n",
    "\n",
    "            watchlist = [(train_matrix, 'train'),(valid_matrix, 'eval')]\n",
    "\n",
    "            #model = clf.train(params, train_matrix, num_boost_round=50000, evals=watchlist, verbose_eval=200, early_stopping_rounds=200 ,feval = ks4xgb)\n",
    "            model = clf.train(params, train_matrix, num_boost_round=50000, evals=watchlist, verbose_eval=200, early_stopping_rounds=200 )\n",
    "            \n",
    "            trn_pred  = model.predict(train_matrix, ntree_limit=model.best_ntree_limit)           \n",
    "            val_pred  = model.predict(valid_matrix, ntree_limit=model.best_ntree_limit)\n",
    "            test_pred = model.predict(test_matrix , ntree_limit=model.best_ntree_limit)\n",
    "            \n",
    "            trn_best_thresh, trn_f1 = gen_thres_new(trn_y, trn_pred)\n",
    "            val_best_thresh, val_f1 = gen_thres_new(val_y, val_pred)\n",
    "            test_p2 = (test_pred>=val_best_thresh).astype(int)\n",
    "            test_pred_p2 += test_pred/(folds)\n",
    "            \n",
    "            \n",
    "    \n",
    "        if clf_name == \"cat\":\n",
    "            params = {'learning_rate': 0.05, 'depth': 5, 'l2_leaf_reg': 10, 'bootstrap_type': 'Bernoulli',\n",
    "                      'od_type': 'Iter', 'od_wait': 50, 'random_seed': 22, 'allow_writing_files': False}\n",
    "\n",
    "            model = clf(iterations=50000, **params)\n",
    "            model.fit(trn_x, trn_y, eval_set=(val_x, val_y),\n",
    "                      cat_features=[], use_best_model=True, verbose=500)\n",
    "\n",
    "            trn_pred  = model.predict(trn_x)           \n",
    "            val_pred  = model.predict(val_x)\n",
    "            test_pred = model.predict(test_x)\n",
    "            \n",
    "            trn_best_thresh, trn_f1 = gen_thres_new(trn_y, trn_pred)\n",
    "            val_best_thresh, val_f1 = gen_thres_new(val_y, val_pred)\n",
    "            test_p2 = (test_pred>=val_best_thresh).astype(int)\n",
    "            test_pred_p2 += test_pred/(folds)\n",
    "\n",
    "        train[valid_index] = val_pred\n",
    "        cv_scores.append(roc_auc_score(val_y, val_pred))\n",
    "        val_f1_scores.append(val_f1)\n",
    "        trn_f1_scores.append(trn_f1)\n",
    "        test = test + test_pred / kf.n_splits\n",
    "        testtp = testtp + test_p2 / kf.n_splits    \n",
    "        \n",
    "        models.append(model)\n",
    "\n",
    "        print(cv_scores)\n",
    "\n",
    "    print(\"%s_scotrainre_list:\" % clf_name, cv_scores)\n",
    "    print(\"%s_score_mean:\" % clf_name, np.mean(cv_scores))\n",
    "    print(\"%s_score_std:\" % clf_name, np.std(cv_scores))\n",
    "    \n",
    "    print(\"%s_scotrainre_list:\" % 'trn_f1', trn_f1_scores)\n",
    "    print(\"%s_score_mean:\" % 'trn_f1', np.mean(trn_f1_scores))\n",
    "    print(\"%s_score_std:\" % 'trn_f1', np.std(trn_f1_scores))\n",
    "    \n",
    "    print(\"%s_scotrainre_list:\" % 'val_f1', val_f1_scores)\n",
    "    print(\"%s_score_mean:\" % 'val_f1', np.mean(val_f1_scores))\n",
    "    print(\"%s_score_std:\" % 'val_f1', np.std(val_f1_scores))\n",
    "  \n",
    "    testtp = (testtp>= threshold).astype(int)\n",
    "    \n",
    "    return train, test, testtp, models\n",
    "\n",
    "def lgb_model(x_train, y_train, x_test ,seed, threshold = 0.5):\n",
    "    lgb_train, lgb_test, lgb_testtp, models = cv_model(lgb, x_train, y_train, x_test, \"lgb\",seed , threshold)\n",
    "    return lgb_train, lgb_test, lgb_testtp, models\n",
    "\n",
    "def xgb_model(x_train, y_train, x_test, seed, threshold = 0.5):\n",
    "    xgb_train, xgb_test, xgb_testtp, models = cv_model(xgb, x_train, y_train, x_test, \"xgb\",seed, threshold)\n",
    "    return xgb_train, xgb_test,xgb_testtp, models\n",
    "\n",
    "def cat_model(x_train, y_train, x_test ,seed, threshold = 0.5):\n",
    "    cat_train, cat_test,cat_testtp ,models= cv_model(CatBoostRegressor, x_train, y_train, x_test, \"cat\" ,seed, threshold) \n",
    "    return cat_train, cat_test, cat_testtp, models"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 函数_本地F1验证"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def local_f1(y_pred, y_true): \n",
    "    list_pred = y_pred['TD_IND'].to_list()\n",
    "    list_pred.extend(y_pred['FNCG_IND'].to_list())\n",
    "    list_pred.extend(y_pred['FUND_IND'].to_list())\n",
    "    list_pred.extend(y_pred['INSUR_IND'].to_list())\n",
    "    list_pred.extend(y_pred['IL_IND'].to_list())\n",
    "    \n",
    "    list_true = y_true['TD_IND'].to_list() \n",
    "    list_true.extend(y_true['FNCG_IND'].to_list()) \n",
    "    list_true.extend(y_true['FUND_IND'].to_list())\n",
    "    list_true.extend(y_true['INSUR_IND'].to_list())\n",
    "    list_true.extend(y_true['IL_IND'].to_list())\n",
    "    return f1_score(list_pred ,list_true    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def reduce_mem_usage(df):\n",
    "    \"\"\" iterate through all the columns of a dataframe and modify the data type\n",
    "        to reduce memory usage.\n",
    "    \"\"\"\n",
    "    start_mem = df.memory_usage().sum()\n",
    "    print('内存占用{:.2f} MB'.format(start_mem/1024/1024))\n",
    "\n",
    "    for col in df.columns:\n",
    "        col_type = df[col].dtype\n",
    "\n",
    "        if col_type != object:\n",
    "            c_min = df[col].min()\n",
    "            c_max = df[col].max()\n",
    "            if str(col_type)[:3] == 'int':\n",
    "                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n",
    "                    df[col] = df[col].astype(np.int8)\n",
    "                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n",
    "                    df[col] = df[col].astype(np.int16)\n",
    "                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n",
    "                    df[col] = df[col].astype(np.int32)\n",
    "                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n",
    "                    df[col] = df[col].astype(np.int64)\n",
    "            else:\n",
    "                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n",
    "                    df[col] = df[col].astype(np.float16)\n",
    "                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n",
    "                    df[col] = df[col].astype(np.float32)\n",
    "                else:\n",
    "                    df[col] = df[col].astype(np.float64)\n",
    "\n",
    "    end_mem = df.memory_usage().sum()\n",
    "    print('优化后内存为: {:.2f} MB'.format(end_mem /1024/1024))\n",
    "    print('内存使用减少 {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))\n",
    "    return df\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 函数_连续型特征标准化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-10-29T07:24:54.814335Z",
     "iopub.status.busy": "2023-10-29T07:24:54.814042Z",
     "iopub.status.idle": "2023-10-29T07:24:54.822877Z",
     "shell.execute_reply": "2023-10-29T07:24:54.822244Z",
     "shell.execute_reply.started": "2023-10-29T07:24:54.814301Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_continuous_standard(original_data,standardflag):\n",
    "    list_allc = original_data.columns.tolist()\n",
    "    output_data = original_data;\n",
    "    for list_i in list_allc:\n",
    "        if standardflag == 0:\n",
    "            print('使用最大最小值标准化连续特征',list_i)  \n",
    "            #output_data[list_i]= (original_data[list_i] - original_data[list_i].min())/(original_data[list_i].max() - original_data[list_i].min())\n",
    "            temp = original_data[list_i].copy()\n",
    "            tp = temp.dtype\n",
    "            if tp not in ['int64', 'float64']:\n",
    "                temp = temp.astype('float64')\n",
    "            up = temp.quantile(0.9995)\n",
    "            down = temp.quantile(0.0005)\n",
    "            temp = temp.fillna(0)\n",
    "            index1 = temp[temp>up].index.tolist()\n",
    "            index2 = temp[temp<down].index.tolist()\n",
    "            temp.loc[index1] = up\n",
    "            temp.loc[index2] = down\n",
    "            #if up != down:\n",
    "            #    temp = temp.apply(lambda x:(x-down)/(up-down))\n",
    "            #    output_data[list_i] = round(temp,4)   \n",
    "            output_data[list_i] = temp\n",
    "        elif standardflag == 1:\n",
    "            print('使用均值标准差标准化连续特征',list_i)  \n",
    "            output_data[list_i]= (original_data[list_i] - original_data[list_i].mean())/original_data[list_i].std()\n",
    "    print('标准化完成！')  \n",
    "    return output_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 函数_K折特征目标特征编码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def kfold_stats_feature(train, test, feats, k,label='label'):\n",
    "    folds = StratifiedKFold(n_splits=k, shuffle=True, random_state=2020)  # 这里最好和后面模型的K折交叉验证保持一致\n",
    "\n",
    "    train['fold'] = None\n",
    "    for fold_, (trn_idx, val_idx) in enumerate(folds.split(train, train[label])):\n",
    "        train.loc[val_idx, 'fold'] = fold_\n",
    "\n",
    "    kfold_features = []\n",
    "    for feat in feats:\n",
    "        nums_columns = [label]\n",
    "        for f in nums_columns:\n",
    "            colname = feat + '_' + f + '_kfold_mean'\n",
    "            kfold_features.append(colname)\n",
    "            train[colname] = None\n",
    "            for fold_, (trn_idx, val_idx) in enumerate(folds.split(train, train[label])):\n",
    "                tmp_trn = train.iloc[trn_idx]\n",
    "                order_label = tmp_trn.groupby([feat])[f].mean()\n",
    "                tmp = train.loc[train.fold == fold_, [feat]]\n",
    "                train.loc[train.fold == fold_, colname] = tmp[feat].map(order_label)\n",
    "                # fillna\n",
    "                global_mean = train[f].mean()\n",
    "                train.loc[train.fold == fold_, colname] = train.loc[train.fold == fold_, colname].fillna(global_mean)\n",
    "            train[colname] = train[colname].astype(float)\n",
    "\n",
    "        for f in nums_columns:\n",
    "            colname = feat + '_' + f + '_kfold_mean'\n",
    "            test[colname] = None\n",
    "            order_label = train.groupby([feat])[f].mean()\n",
    "            test[colname] = test[feat].map(order_label)\n",
    "            # fillna\n",
    "            global_mean = train[f].mean()\n",
    "            test[colname] = test[colname].fillna(global_mean)\n",
    "            test[colname] = test[colname].astype(float)\n",
    "    del train['fold']\n",
    "    return train, test"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  },
  "toc-autonumbering": true
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
