{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据读取与计算\n",
    "import pandas as  pd\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "\n",
    "# 数据预处理与模型选择\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.metrics import confusion_matrix, precision_recall_curve, auc, roc_auc_score, roc_curve, recall_score, classification_report\n",
    "import itertools\n",
    "\n",
    "# 随机森林与SVM\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from scipy import stats\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>V1</th>\n",
       "      <th>V2</th>\n",
       "      <th>V3</th>\n",
       "      <th>V4</th>\n",
       "      <th>V5</th>\n",
       "      <th>V6</th>\n",
       "      <th>V7</th>\n",
       "      <th>V8</th>\n",
       "      <th>V9</th>\n",
       "      <th>V10</th>\n",
       "      <th>...</th>\n",
       "      <th>V21</th>\n",
       "      <th>V22</th>\n",
       "      <th>V23</th>\n",
       "      <th>V24</th>\n",
       "      <th>V25</th>\n",
       "      <th>V26</th>\n",
       "      <th>V27</th>\n",
       "      <th>V28</th>\n",
       "      <th>Amount</th>\n",
       "      <th>Class</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>-1.359807</td>\n",
       "      <td>-0.072781</td>\n",
       "      <td>2.536347</td>\n",
       "      <td>1.378155</td>\n",
       "      <td>-0.338321</td>\n",
       "      <td>0.462388</td>\n",
       "      <td>0.239599</td>\n",
       "      <td>0.098698</td>\n",
       "      <td>0.363787</td>\n",
       "      <td>0.090794</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.018307</td>\n",
       "      <td>0.277838</td>\n",
       "      <td>-0.110474</td>\n",
       "      <td>0.066928</td>\n",
       "      <td>0.128539</td>\n",
       "      <td>-0.189115</td>\n",
       "      <td>0.133558</td>\n",
       "      <td>-0.021053</td>\n",
       "      <td>149.62</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1.191857</td>\n",
       "      <td>0.266151</td>\n",
       "      <td>0.166480</td>\n",
       "      <td>0.448154</td>\n",
       "      <td>0.060018</td>\n",
       "      <td>-0.082361</td>\n",
       "      <td>-0.078803</td>\n",
       "      <td>0.085102</td>\n",
       "      <td>-0.255425</td>\n",
       "      <td>-0.166974</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.225775</td>\n",
       "      <td>-0.638672</td>\n",
       "      <td>0.101288</td>\n",
       "      <td>-0.339846</td>\n",
       "      <td>0.167170</td>\n",
       "      <td>0.125895</td>\n",
       "      <td>-0.008983</td>\n",
       "      <td>0.014724</td>\n",
       "      <td>2.69</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-1.358354</td>\n",
       "      <td>-1.340163</td>\n",
       "      <td>1.773209</td>\n",
       "      <td>0.379780</td>\n",
       "      <td>-0.503198</td>\n",
       "      <td>1.800499</td>\n",
       "      <td>0.791461</td>\n",
       "      <td>0.247676</td>\n",
       "      <td>-1.514654</td>\n",
       "      <td>0.207643</td>\n",
       "      <td>...</td>\n",
       "      <td>0.247998</td>\n",
       "      <td>0.771679</td>\n",
       "      <td>0.909412</td>\n",
       "      <td>-0.689281</td>\n",
       "      <td>-0.327642</td>\n",
       "      <td>-0.139097</td>\n",
       "      <td>-0.055353</td>\n",
       "      <td>-0.059752</td>\n",
       "      <td>378.66</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>-0.966272</td>\n",
       "      <td>-0.185226</td>\n",
       "      <td>1.792993</td>\n",
       "      <td>-0.863291</td>\n",
       "      <td>-0.010309</td>\n",
       "      <td>1.247203</td>\n",
       "      <td>0.237609</td>\n",
       "      <td>0.377436</td>\n",
       "      <td>-1.387024</td>\n",
       "      <td>-0.054952</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.108300</td>\n",
       "      <td>0.005274</td>\n",
       "      <td>-0.190321</td>\n",
       "      <td>-1.175575</td>\n",
       "      <td>0.647376</td>\n",
       "      <td>-0.221929</td>\n",
       "      <td>0.062723</td>\n",
       "      <td>0.061458</td>\n",
       "      <td>123.50</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>-1.158233</td>\n",
       "      <td>0.877737</td>\n",
       "      <td>1.548718</td>\n",
       "      <td>0.403034</td>\n",
       "      <td>-0.407193</td>\n",
       "      <td>0.095921</td>\n",
       "      <td>0.592941</td>\n",
       "      <td>-0.270533</td>\n",
       "      <td>0.817739</td>\n",
       "      <td>0.753074</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.009431</td>\n",
       "      <td>0.798278</td>\n",
       "      <td>-0.137458</td>\n",
       "      <td>0.141267</td>\n",
       "      <td>-0.206010</td>\n",
       "      <td>0.502292</td>\n",
       "      <td>0.219422</td>\n",
       "      <td>0.215153</td>\n",
       "      <td>69.99</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>-0.425966</td>\n",
       "      <td>0.960523</td>\n",
       "      <td>1.141109</td>\n",
       "      <td>-0.168252</td>\n",
       "      <td>0.420987</td>\n",
       "      <td>-0.029728</td>\n",
       "      <td>0.476201</td>\n",
       "      <td>0.260314</td>\n",
       "      <td>-0.568671</td>\n",
       "      <td>-0.371407</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.208254</td>\n",
       "      <td>-0.559825</td>\n",
       "      <td>-0.026398</td>\n",
       "      <td>-0.371427</td>\n",
       "      <td>-0.232794</td>\n",
       "      <td>0.105915</td>\n",
       "      <td>0.253844</td>\n",
       "      <td>0.081080</td>\n",
       "      <td>3.67</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>1.229658</td>\n",
       "      <td>0.141004</td>\n",
       "      <td>0.045371</td>\n",
       "      <td>1.202613</td>\n",
       "      <td>0.191881</td>\n",
       "      <td>0.272708</td>\n",
       "      <td>-0.005159</td>\n",
       "      <td>0.081213</td>\n",
       "      <td>0.464960</td>\n",
       "      <td>-0.099254</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.167716</td>\n",
       "      <td>-0.270710</td>\n",
       "      <td>-0.154104</td>\n",
       "      <td>-0.780055</td>\n",
       "      <td>0.750137</td>\n",
       "      <td>-0.257237</td>\n",
       "      <td>0.034507</td>\n",
       "      <td>0.005168</td>\n",
       "      <td>4.99</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>-0.644269</td>\n",
       "      <td>1.417964</td>\n",
       "      <td>1.074380</td>\n",
       "      <td>-0.492199</td>\n",
       "      <td>0.948934</td>\n",
       "      <td>0.428118</td>\n",
       "      <td>1.120631</td>\n",
       "      <td>-3.807864</td>\n",
       "      <td>0.615375</td>\n",
       "      <td>1.249376</td>\n",
       "      <td>...</td>\n",
       "      <td>1.943465</td>\n",
       "      <td>-1.015455</td>\n",
       "      <td>0.057504</td>\n",
       "      <td>-0.649709</td>\n",
       "      <td>-0.415267</td>\n",
       "      <td>-0.051634</td>\n",
       "      <td>-1.206921</td>\n",
       "      <td>-1.085339</td>\n",
       "      <td>40.80</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>-0.894286</td>\n",
       "      <td>0.286157</td>\n",
       "      <td>-0.113192</td>\n",
       "      <td>-0.271526</td>\n",
       "      <td>2.669599</td>\n",
       "      <td>3.721818</td>\n",
       "      <td>0.370145</td>\n",
       "      <td>0.851084</td>\n",
       "      <td>-0.392048</td>\n",
       "      <td>-0.410430</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.073425</td>\n",
       "      <td>-0.268092</td>\n",
       "      <td>-0.204233</td>\n",
       "      <td>1.011592</td>\n",
       "      <td>0.373205</td>\n",
       "      <td>-0.384157</td>\n",
       "      <td>0.011747</td>\n",
       "      <td>0.142404</td>\n",
       "      <td>93.20</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>-0.338262</td>\n",
       "      <td>1.119593</td>\n",
       "      <td>1.044367</td>\n",
       "      <td>-0.222187</td>\n",
       "      <td>0.499361</td>\n",
       "      <td>-0.246761</td>\n",
       "      <td>0.651583</td>\n",
       "      <td>0.069539</td>\n",
       "      <td>-0.736727</td>\n",
       "      <td>-0.366846</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.246914</td>\n",
       "      <td>-0.633753</td>\n",
       "      <td>-0.120794</td>\n",
       "      <td>-0.385050</td>\n",
       "      <td>-0.069733</td>\n",
       "      <td>0.094199</td>\n",
       "      <td>0.246219</td>\n",
       "      <td>0.083076</td>\n",
       "      <td>3.68</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>10 rows × 30 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         V1        V2        V3        V4        V5        V6        V7  \\\n",
       "0 -1.359807 -0.072781  2.536347  1.378155 -0.338321  0.462388  0.239599   \n",
       "1  1.191857  0.266151  0.166480  0.448154  0.060018 -0.082361 -0.078803   \n",
       "2 -1.358354 -1.340163  1.773209  0.379780 -0.503198  1.800499  0.791461   \n",
       "3 -0.966272 -0.185226  1.792993 -0.863291 -0.010309  1.247203  0.237609   \n",
       "4 -1.158233  0.877737  1.548718  0.403034 -0.407193  0.095921  0.592941   \n",
       "5 -0.425966  0.960523  1.141109 -0.168252  0.420987 -0.029728  0.476201   \n",
       "6  1.229658  0.141004  0.045371  1.202613  0.191881  0.272708 -0.005159   \n",
       "7 -0.644269  1.417964  1.074380 -0.492199  0.948934  0.428118  1.120631   \n",
       "8 -0.894286  0.286157 -0.113192 -0.271526  2.669599  3.721818  0.370145   \n",
       "9 -0.338262  1.119593  1.044367 -0.222187  0.499361 -0.246761  0.651583   \n",
       "\n",
       "         V8        V9       V10  ...         V21       V22       V23  \\\n",
       "0  0.098698  0.363787  0.090794  ...   -0.018307  0.277838 -0.110474   \n",
       "1  0.085102 -0.255425 -0.166974  ...   -0.225775 -0.638672  0.101288   \n",
       "2  0.247676 -1.514654  0.207643  ...    0.247998  0.771679  0.909412   \n",
       "3  0.377436 -1.387024 -0.054952  ...   -0.108300  0.005274 -0.190321   \n",
       "4 -0.270533  0.817739  0.753074  ...   -0.009431  0.798278 -0.137458   \n",
       "5  0.260314 -0.568671 -0.371407  ...   -0.208254 -0.559825 -0.026398   \n",
       "6  0.081213  0.464960 -0.099254  ...   -0.167716 -0.270710 -0.154104   \n",
       "7 -3.807864  0.615375  1.249376  ...    1.943465 -1.015455  0.057504   \n",
       "8  0.851084 -0.392048 -0.410430  ...   -0.073425 -0.268092 -0.204233   \n",
       "9  0.069539 -0.736727 -0.366846  ...   -0.246914 -0.633753 -0.120794   \n",
       "\n",
       "        V24       V25       V26       V27       V28  Amount  Class  \n",
       "0  0.066928  0.128539 -0.189115  0.133558 -0.021053  149.62      0  \n",
       "1 -0.339846  0.167170  0.125895 -0.008983  0.014724    2.69      0  \n",
       "2 -0.689281 -0.327642 -0.139097 -0.055353 -0.059752  378.66      0  \n",
       "3 -1.175575  0.647376 -0.221929  0.062723  0.061458  123.50      0  \n",
       "4  0.141267 -0.206010  0.502292  0.219422  0.215153   69.99      0  \n",
       "5 -0.371427 -0.232794  0.105915  0.253844  0.081080    3.67      0  \n",
       "6 -0.780055  0.750137 -0.257237  0.034507  0.005168    4.99      0  \n",
       "7 -0.649709 -0.415267 -0.051634 -1.206921 -1.085339   40.80      0  \n",
       "8  1.011592  0.373205 -0.384157  0.011747  0.142404   93.20      0  \n",
       "9 -0.385050 -0.069733  0.094199  0.246219  0.083076    3.68      0  \n",
       "\n",
       "[10 rows x 30 columns]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "# 一些基本参数设定\n",
    "mode = 2\n",
    "ratio = 1\n",
    "iteration1 = 100\n",
    "show_best_c = True\n",
    "show_bdry = True\n",
    "\n",
    "##读取数据\n",
    "data=pd.read_csv('creditcard.csv')\n",
    "data.drop('Time',axis=1,inplace=True)\n",
    "data.head(10)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>V1</th>\n",
       "      <th>V2</th>\n",
       "      <th>V3</th>\n",
       "      <th>V4</th>\n",
       "      <th>V5</th>\n",
       "      <th>V6</th>\n",
       "      <th>V7</th>\n",
       "      <th>V8</th>\n",
       "      <th>V9</th>\n",
       "      <th>V10</th>\n",
       "      <th>...</th>\n",
       "      <th>V21</th>\n",
       "      <th>V22</th>\n",
       "      <th>V23</th>\n",
       "      <th>V24</th>\n",
       "      <th>V25</th>\n",
       "      <th>V26</th>\n",
       "      <th>V27</th>\n",
       "      <th>V28</th>\n",
       "      <th>Amount</th>\n",
       "      <th>Class</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>-0.694242</td>\n",
       "      <td>-0.044075</td>\n",
       "      <td>1.672773</td>\n",
       "      <td>0.973366</td>\n",
       "      <td>-0.245117</td>\n",
       "      <td>0.347068</td>\n",
       "      <td>0.193679</td>\n",
       "      <td>0.082637</td>\n",
       "      <td>0.331128</td>\n",
       "      <td>0.083386</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.024923</td>\n",
       "      <td>0.382854</td>\n",
       "      <td>-0.176911</td>\n",
       "      <td>0.110507</td>\n",
       "      <td>0.246585</td>\n",
       "      <td>-0.392170</td>\n",
       "      <td>0.330892</td>\n",
       "      <td>-0.063781</td>\n",
       "      <td>0.244964</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.608496</td>\n",
       "      <td>0.161176</td>\n",
       "      <td>0.109797</td>\n",
       "      <td>0.316523</td>\n",
       "      <td>0.043483</td>\n",
       "      <td>-0.061820</td>\n",
       "      <td>-0.063700</td>\n",
       "      <td>0.071253</td>\n",
       "      <td>-0.232494</td>\n",
       "      <td>-0.153350</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.307377</td>\n",
       "      <td>-0.880077</td>\n",
       "      <td>0.162201</td>\n",
       "      <td>-0.561131</td>\n",
       "      <td>0.320694</td>\n",
       "      <td>0.261069</td>\n",
       "      <td>-0.022256</td>\n",
       "      <td>0.044608</td>\n",
       "      <td>-0.342475</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>-0.693500</td>\n",
       "      <td>-0.811578</td>\n",
       "      <td>1.169468</td>\n",
       "      <td>0.268231</td>\n",
       "      <td>-0.364572</td>\n",
       "      <td>1.351454</td>\n",
       "      <td>0.639776</td>\n",
       "      <td>0.207373</td>\n",
       "      <td>-1.378675</td>\n",
       "      <td>0.190700</td>\n",
       "      <td>...</td>\n",
       "      <td>0.337632</td>\n",
       "      <td>1.063358</td>\n",
       "      <td>1.456320</td>\n",
       "      <td>-1.138092</td>\n",
       "      <td>-0.628537</td>\n",
       "      <td>-0.288447</td>\n",
       "      <td>-0.137137</td>\n",
       "      <td>-0.181021</td>\n",
       "      <td>1.160686</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>-0.493325</td>\n",
       "      <td>-0.112169</td>\n",
       "      <td>1.182516</td>\n",
       "      <td>-0.609727</td>\n",
       "      <td>-0.007469</td>\n",
       "      <td>0.936150</td>\n",
       "      <td>0.192071</td>\n",
       "      <td>0.316018</td>\n",
       "      <td>-1.262503</td>\n",
       "      <td>-0.050468</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.147443</td>\n",
       "      <td>0.007267</td>\n",
       "      <td>-0.304777</td>\n",
       "      <td>-1.941027</td>\n",
       "      <td>1.241904</td>\n",
       "      <td>-0.460217</td>\n",
       "      <td>0.155396</td>\n",
       "      <td>0.186189</td>\n",
       "      <td>0.140534</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>-0.591330</td>\n",
       "      <td>0.531541</td>\n",
       "      <td>1.021412</td>\n",
       "      <td>0.284655</td>\n",
       "      <td>-0.295015</td>\n",
       "      <td>0.071999</td>\n",
       "      <td>0.479302</td>\n",
       "      <td>-0.226510</td>\n",
       "      <td>0.744326</td>\n",
       "      <td>0.691625</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.012839</td>\n",
       "      <td>1.100011</td>\n",
       "      <td>-0.220123</td>\n",
       "      <td>0.233250</td>\n",
       "      <td>-0.395202</td>\n",
       "      <td>1.041611</td>\n",
       "      <td>0.543620</td>\n",
       "      <td>0.651816</td>\n",
       "      <td>-0.073403</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>-0.217475</td>\n",
       "      <td>0.581675</td>\n",
       "      <td>0.752585</td>\n",
       "      <td>-0.118833</td>\n",
       "      <td>0.305009</td>\n",
       "      <td>-0.022313</td>\n",
       "      <td>0.384936</td>\n",
       "      <td>0.217955</td>\n",
       "      <td>-0.517619</td>\n",
       "      <td>-0.341101</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.283522</td>\n",
       "      <td>-0.771427</td>\n",
       "      <td>-0.042273</td>\n",
       "      <td>-0.613273</td>\n",
       "      <td>-0.446584</td>\n",
       "      <td>0.219637</td>\n",
       "      <td>0.628900</td>\n",
       "      <td>0.245636</td>\n",
       "      <td>-0.338556</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>0.627795</td>\n",
       "      <td>0.085389</td>\n",
       "      <td>0.029923</td>\n",
       "      <td>0.849383</td>\n",
       "      <td>0.139020</td>\n",
       "      <td>0.204695</td>\n",
       "      <td>-0.004170</td>\n",
       "      <td>0.067998</td>\n",
       "      <td>0.423218</td>\n",
       "      <td>-0.091155</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.228334</td>\n",
       "      <td>-0.373032</td>\n",
       "      <td>-0.246780</td>\n",
       "      <td>-1.287973</td>\n",
       "      <td>1.439037</td>\n",
       "      <td>-0.533436</td>\n",
       "      <td>0.085492</td>\n",
       "      <td>0.015656</td>\n",
       "      <td>-0.333279</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>-0.328928</td>\n",
       "      <td>0.858692</td>\n",
       "      <td>0.708576</td>\n",
       "      <td>-0.347631</td>\n",
       "      <td>0.687512</td>\n",
       "      <td>0.321345</td>\n",
       "      <td>0.905860</td>\n",
       "      <td>-3.188229</td>\n",
       "      <td>0.560129</td>\n",
       "      <td>1.147430</td>\n",
       "      <td>...</td>\n",
       "      <td>2.645889</td>\n",
       "      <td>-1.399276</td>\n",
       "      <td>0.092085</td>\n",
       "      <td>-1.072754</td>\n",
       "      <td>-0.796633</td>\n",
       "      <td>-0.107075</td>\n",
       "      <td>-2.990154</td>\n",
       "      <td>-3.288083</td>\n",
       "      <td>-0.190107</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>-0.456573</td>\n",
       "      <td>0.173291</td>\n",
       "      <td>-0.074653</td>\n",
       "      <td>-0.191774</td>\n",
       "      <td>1.934149</td>\n",
       "      <td>2.793594</td>\n",
       "      <td>0.299206</td>\n",
       "      <td>0.712592</td>\n",
       "      <td>-0.356851</td>\n",
       "      <td>-0.376940</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.099963</td>\n",
       "      <td>-0.369425</td>\n",
       "      <td>-0.327055</td>\n",
       "      <td>1.670269</td>\n",
       "      <td>0.715943</td>\n",
       "      <td>-0.796633</td>\n",
       "      <td>0.029104</td>\n",
       "      <td>0.431420</td>\n",
       "      <td>0.019392</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>-0.172698</td>\n",
       "      <td>0.678005</td>\n",
       "      <td>0.688781</td>\n",
       "      <td>-0.156927</td>\n",
       "      <td>0.361792</td>\n",
       "      <td>-0.185219</td>\n",
       "      <td>0.526706</td>\n",
       "      <td>0.058223</td>\n",
       "      <td>-0.670587</td>\n",
       "      <td>-0.336912</td>\n",
       "      <td>...</td>\n",
       "      <td>-0.336156</td>\n",
       "      <td>-0.873298</td>\n",
       "      <td>-0.193438</td>\n",
       "      <td>-0.635767</td>\n",
       "      <td>-0.133773</td>\n",
       "      <td>0.195342</td>\n",
       "      <td>0.610010</td>\n",
       "      <td>0.251681</td>\n",
       "      <td>-0.338516</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>10 rows × 30 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         V1        V2        V3        V4        V5        V6        V7  \\\n",
       "0 -0.694242 -0.044075  1.672773  0.973366 -0.245117  0.347068  0.193679   \n",
       "1  0.608496  0.161176  0.109797  0.316523  0.043483 -0.061820 -0.063700   \n",
       "2 -0.693500 -0.811578  1.169468  0.268231 -0.364572  1.351454  0.639776   \n",
       "3 -0.493325 -0.112169  1.182516 -0.609727 -0.007469  0.936150  0.192071   \n",
       "4 -0.591330  0.531541  1.021412  0.284655 -0.295015  0.071999  0.479302   \n",
       "5 -0.217475  0.581675  0.752585 -0.118833  0.305009 -0.022313  0.384936   \n",
       "6  0.627795  0.085389  0.029923  0.849383  0.139020  0.204695 -0.004170   \n",
       "7 -0.328928  0.858692  0.708576 -0.347631  0.687512  0.321345  0.905860   \n",
       "8 -0.456573  0.173291 -0.074653 -0.191774  1.934149  2.793594  0.299206   \n",
       "9 -0.172698  0.678005  0.688781 -0.156927  0.361792 -0.185219  0.526706   \n",
       "\n",
       "         V8        V9       V10  ...         V21       V22       V23  \\\n",
       "0  0.082637  0.331128  0.083386  ...   -0.024923  0.382854 -0.176911   \n",
       "1  0.071253 -0.232494 -0.153350  ...   -0.307377 -0.880077  0.162201   \n",
       "2  0.207373 -1.378675  0.190700  ...    0.337632  1.063358  1.456320   \n",
       "3  0.316018 -1.262503 -0.050468  ...   -0.147443  0.007267 -0.304777   \n",
       "4 -0.226510  0.744326  0.691625  ...   -0.012839  1.100011 -0.220123   \n",
       "5  0.217955 -0.517619 -0.341101  ...   -0.283522 -0.771427 -0.042273   \n",
       "6  0.067998  0.423218 -0.091155  ...   -0.228334 -0.373032 -0.246780   \n",
       "7 -3.188229  0.560129  1.147430  ...    2.645889 -1.399276  0.092085   \n",
       "8  0.712592 -0.356851 -0.376940  ...   -0.099963 -0.369425 -0.327055   \n",
       "9  0.058223 -0.670587 -0.336912  ...   -0.336156 -0.873298 -0.193438   \n",
       "\n",
       "        V24       V25       V26       V27       V28    Amount  Class  \n",
       "0  0.110507  0.246585 -0.392170  0.330892 -0.063781  0.244964      0  \n",
       "1 -0.561131  0.320694  0.261069 -0.022256  0.044608 -0.342475      0  \n",
       "2 -1.138092 -0.628537 -0.288447 -0.137137 -0.181021  1.160686      0  \n",
       "3 -1.941027  1.241904 -0.460217  0.155396  0.186189  0.140534      0  \n",
       "4  0.233250 -0.395202  1.041611  0.543620  0.651816 -0.073403      0  \n",
       "5 -0.613273 -0.446584  0.219637  0.628900  0.245636 -0.338556      0  \n",
       "6 -1.287973  1.439037 -0.533436  0.085492  0.015656 -0.333279      0  \n",
       "7 -1.072754 -0.796633 -0.107075 -2.990154 -3.288083 -0.190107      0  \n",
       "8  1.670269  0.715943 -0.796633  0.029104  0.431420  0.019392      0  \n",
       "9 -0.635767 -0.133773  0.195342  0.610010  0.251681 -0.338516      0  \n",
       "\n",
       "[10 rows x 30 columns]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "##归一化\n",
    "def normalize_feature(data,amount_only=False):\n",
    "    if amount_only:\n",
    "        data['Amount']=StandardScaler().fit_transform(data['Amount'].values.reshape(-1,1))\n",
    "    else:\n",
    "        for feature in data.columns.values.tolist():\n",
    "            if feature!='Class':\n",
    "                data[feature]=StandardScaler().fit_transform(data[feature].values.reshape(-1,1))\n",
    "    return data\n",
    "\n",
    "data=normalize_feature(data)\n",
    "data.head(10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据被切分成训练集和测试集\n",
    "def split_train_test(fraud_indices, normal_indices, test_size = 0.3):\n",
    "    number_records_fraud = len(fraud_indices)\n",
    "    number_records_normal = len(normal_indices)\n",
    "    test_fraud_end = int(number_records_fraud * test_size)\n",
    "    test_normal_end = int(number_records_normal  * test_size)\n",
    "\n",
    "    test_fraud_indices = fraud_indices[0:test_fraud_end]\n",
    "    train_fraud_indices = fraud_indices[test_fraud_end:]\n",
    "\n",
    "    test_normal_indices = normal_indices[0:test_normal_end]\n",
    "    train_normal_indices = normal_indices[test_normal_end:]\n",
    "\n",
    "    return train_normal_indices, train_fraud_indices, test_normal_indices, test_fraud_indices\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# indices存储的是数据的下标\n",
    "def getTrainingSample(train_fraud_indices, train_normal_indices, data, train_normal_pos,ratio):\n",
    "    train_number_records_fraud= int(ratio*len(train_fraud_indices))\n",
    "    train_number_records_normal= len(train_normal_indices)\n",
    "    \n",
    "    # 数据下采样\n",
    "    if train_normal_pos + train_number_records_fraud <= train_number_records_normal:\n",
    "        small_train_normal_indices = train_normal_indics[train_normal_pos: train_normal_pos+train_number_records_fraud]\n",
    "        train_normal_pos = train_normal_pos + train_number_records_fraud\n",
    "        \n",
    "    # 数据上采样\n",
    "    else:\n",
    "        small_train_normal_indices = np.concatenate([train_normal_indices[train_normal_pos: train_number_records_normal], \n",
    "                                            train_normal_indices[0: train_normal_pos + train_number_records_fraud - train_number_records_normal]])\n",
    "        train_normal_pos = train_normal_pos+train_number_records_fraud - train_number_records_normal\n",
    "    \n",
    "    # 进行数据下标合并，并打乱\n",
    "    under_train_sample_indices = np.concatenate([train_fraud_indices, small_train_normal_indices])\n",
    "    np.random.shuffle(under_train_sample_indices)\n",
    "    \n",
    "    #下采样\n",
    "    under_train_sample_data = data.iloc[under_train_sample_indices,:]\n",
    "    \n",
    "    x_train_undersample = under_train_sample_data.ix[:,under_train_sample_data.columns != 'Class']\n",
    "    y_train_undersample = under_train_sample_data.ix[:,under_train_sample_data.columns == 'Class']\n",
    "    \n",
    "    # 返回的是已经进行过采样的特征和目标特征\n",
    "    return x_train_undersample,y_train_undersample,train_normal_pos"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# predict_proba  https://blog.csdn.net/anqijiayou/article/details/80295237\n",
    "def knn_module(x,y,indices, c_param, bdry=None):\n",
    "    knn=KNeighborsClassifier(n_neighbors=c_param)\n",
    "    #ravel把数组变平\n",
    "    knn.fit(x.iloc[indices[0],:], y.iloc[indices[0],:].values.ravel())\n",
    "    y_pred_undersample = knn.predict(x.iloc[indices[1],:].values)\n",
    "    \n",
    "    return y_pred_undersample\n",
    "    \n",
    "def svm_rbf_module(x, y, indices, c_param, bdry= 0.5):\n",
    "    svm_rbf = SVC(C=c_param, probability=True)\n",
    "    svm_rbf.fit(x.iloc[indices[0],:], y.iloc[indices[0],:].values.ravel())\n",
    "    y_pred_undersample = svm_rbf.predict_proba(x.iloc[indices[1],:].values)[:,1] >= bdry\n",
    "    return y_pred_undersample\n",
    "\n",
    "def svm_poly_module(x,y, indices, c_param, bdry=0.5):\n",
    "    svm_poly=SVC(C=c_param[0], kernel='poly', degree= c_param[1], probability=True)\n",
    "    svm_poly.fit(x.iloc[indices[0],:], y.iloc[indices[0],:].values.ravel())\n",
    "    y_pred_undersample = svm_poly.predict_proba(x.iloc[indices[1],:].values)[:,1] >= bdry\n",
    "    return y_pred_undersample\n",
    "\n",
    "def lr_module(x,y, indices, c_param, bdry=0.5):\n",
    "    # penalty惩罚系数\n",
    "    lr = LogisticRegression(C=c_param,penalty='11')\n",
    "    lr.fit(X.iloc[indices[0],:], y.iloc[indices[0],:].values.ravel())\n",
    "    y_pred_undersample= lr.predict_proba(X.iloc[indices[1],:].values)[:,1]>=bdry\n",
    "    return y_pred_undersample\n",
    "    \n",
    "def rf_module(x,y, indices, c_param, bdry=0.5):\n",
    "    # 参数设置 https://www.cnblogs.com/harvey888/p/6512312.html\n",
    "    rf= RandomForestClassifier(n_jobs=-1,n_estimators=100, criterion='entropy', max_features= 'auto',\n",
    "                               max_depth=None,min_samples_split= c_param, random_state=0)\n",
    "    rf.fit(X.iloc[indices[0],:], y.iloc[indices[0],:].values.ravel())\n",
    "    y_pred_undersample = rf.predict_proba(X.iloc[indices[1],:].values)[:,1]>=bdry\n",
    "    return y_pred_undersample"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "![TIM截图20181229221010.png](https://i.loli.net/2018/12/29/5c278050b4c47.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#https://www.cnblogs.com/zhixingheyi/p/8097782.html\n",
    "#https://blog.csdn.net/xierhacker/article/details/70903617\n",
    "#计算召回率和auc\n",
    "#y_t是真实值，y_p是预测值\n",
    "def compute_recall_and_auc(y_t, y_p):\n",
    "    #混淆矩阵\n",
    "    cnf_matrix=confusion_matrix(y_t,y_p)\n",
    "    #设置numpy的打印精度\n",
    "    np.set_printoptions(precision=2)\n",
    "    recall_score = cnf_matrix[0,0]/(cnf_matrix[1,0]+cnf_matrix[0,0])\n",
    "    \n",
    "    #Roc曲线\n",
    "    # https://www.cnblogs.com/gatherstars/p/6084696.html\n",
    "    fpr, tpr,thresholds = roc_curve(y_t,y_p)\n",
    "    roc_auc= auc(fpr,tpr)\n",
    "    return recall_score , roc_auc\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#自己实现寻找最优超参数\n",
    "def cross_validation_recall(x_train_data, y_train_data, c_param_range, models_dict, model_name):\n",
    "    #使用K折交叉验证来寻找最优超参数\n",
    "    fold=KFold(5,shuffle=False)\n",
    "    # 构造超参数得分列表\n",
    "    results_table = pd.DataFrame(index= range(len(c_param_range),2), columns = ['C_parameter','Mean recall score'])\n",
    "    results_table['C_parameter'] = c_param_range\n",
    "    \n",
    "    recall_mean=[]\n",
    "    # 循环使用每个超参数\n",
    "    for c_param in c_param_range:\n",
    "        recall_aucs=[]\n",
    "        \n",
    "        # 循环交叉集\n",
    "        for i,train_index in enumerate(fold.split(y_train_data)):\n",
    "            # 模型训练\n",
    "            y_pred_undersample= models_dict[model_name](x_train_data,y_train_data, train_index, c_param)\n",
    "            \n",
    "            # 计算召回率和ROC曲线\n",
    "            recall_auc, _=compute_recall_and_auc(y_train_data.iloc[train_index[1],:].values,y_pred_undersample)\n",
    "            print(model_name,'第',i,'次：',recall_auc)\n",
    "            recall_aucs.append(recall_auc)\n",
    "        \n",
    "        # auc取平均值作为这组超参数的分数\n",
    "        recall_mean.append(np.mean(recall_aucs))\n",
    "    \n",
    "    results_table['Mean recall score'] = recall_mean\n",
    "    # 得分最大的一组作为最优超参数，并返回\n",
    "    best_c = results_table.loc[results_table['Mean recall score'].idxmax()]['C_parameter']\n",
    "    return best_c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 不同的决策边界阈值\n",
    "# 也是通过遍历调参的方式确定\n",
    "def decision_boundary(x_train_data, y_train_data, fold, best_c, bdry_dict, models_dict, model_name):\n",
    "    bdry_range= [0.3,0.35,0.4,0.45,0.5]\n",
    "    results_table = pd.DataFrame(index = range(len(bdry_ranges),2) , columns = ['Bdry_params','Mean recall score * auc'])\n",
    "    results_table['Bdry_params']= bdry_ranges\n",
    "    \n",
    "    recall_mean=[]\n",
    "    for bdry in bdry_ranges:\n",
    "        recall_accs_aucs = []\n",
    "        for iteration, indices in enumerate(fold.split(y_train_data)):\n",
    "            y_pred_undersample = models_dict[model_name](x_train_data, y_train_data, indices, best_c, bdry)\n",
    "            recall_acc, roc_auc = compute_recall_and_auc(y_train_data.iloc[indices[1],:].values, y_pred_undersample)\n",
    "            \n",
    "            # bdry_dict[model_name]是调用不同模型的计算公式\n",
    "            recall_accs_aucs.append(bdry_dict[model_name](recall_acc, roc_auc))\n",
    "        recall_mean.append(np.mean(recall_accs_aucs))\n",
    "\n",
    "    results_table['Mean recall score * auc'] = recall_mean\n",
    "    best_bdry = results_table.loc[results_table['Mean recall score * auc'].idxmax()]['Bdry_params']\n",
    "\n",
    "    return best_bdry"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model(x,y,train, bdry_dict = None, best_c=None, best_bdry=None, models= None, mode=None):\n",
    "    #训练阶段\n",
    "    if train:\n",
    "        #用不同的模型进行训练\n",
    "        models_dict = {'knn' : knn_module, 'svm_rbf': svm_rbf_module, 'svm_poly': svm_poly_module,\n",
    "                        'lr': lr_module, 'rf': rf_module}\n",
    "        \n",
    "        #knn中取不同的k值(超参数)\n",
    "        c_param_range_knn=[3,5,7,9]\n",
    "        #自定义cross_validation_recall，使用循环找出最适合的超参数。\n",
    "        best_c_knn=cross_validation_recall(x,y, c_param_range_knn,models_dict, 'knn')\n",
    "        \n",
    "        # SVM-RBF中不同的参数\n",
    "        c_param_range_svm_rbf=[0.01,0.1,1,10,100]\n",
    "        best_c_svm_rbf = cross_validation_recall(x,y,c_param_range_svm_rbf, models_dict, 'svm_rbf')\n",
    "        \n",
    "        c_param_range_svm_poly = [[0.01, 2], [0.01, 3], [0.01, 4], [0.01, 5], [0.01, 6], [0.01, 7], [0.01, 8], [0.01, 9],\n",
    "                                  [0.1, 2], [0.1, 3], [0.1, 4], [0.1, 5], [0.1, 6], [0.1, 7], [0.1, 8], [0.1, 9],\n",
    "                                  [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [1, 9],\n",
    "                                  [10, 2], [10, 3], [10, 4], [10, 5], [10, 6], [10, 7], [10, 8], [10, 9],\n",
    "                                  [100, 2], [100, 3], [100, 4], [100, 5], [100, 6], [100, 7], [100, 8], [100, 9]]\n",
    "        \n",
    "        best_c_svm_poly = cross_validation_recall(x,y, c_param_range_svm_poly, models_dict, 'svm_poly')\n",
    "        \n",
    "        # 逻辑回归当中的正则化强度\n",
    "        c_param_range_lr=[0.01,0.1,1,10,100]\n",
    "        best_c_lr = cross_validation_recall(x,y, c_param_range_lr, models_dict, 'lr')\n",
    "        \n",
    "        # 随机森林里调参\n",
    "        c_param_range_rf = [2,5,10,15,20]\n",
    "        best_c_rf= cross_validation_recall(X, y, c_param_range_rf, models_dict, 'rf')\n",
    "        \n",
    "        # 合并超参数\n",
    "        best_c = [best_c_knn, best_c_svm_rbf, best_c_svm_poly, best_c_lr, best_c_rf, best_c]\n",
    "        \n",
    "        # 交叉验证确定合适的决策边界阈值\n",
    "        fold = KFold(4,shuffle=True)\n",
    "        \n",
    "        # decision_boundary是一个计算决策边界的函数\n",
    "        best_bdry_svm_rbf= decision_boundary(x, y, fold, best_c_svm_rbf, bdry_dict, models_dict, 'svm_rbf')\n",
    "        best_bdry_svm_poly = decision_boundary(x, y, fold, best_c_svm_poly, bdry_dict, models_dict, 'svm_poly')\n",
    "        best_bdry_lr = decision_boundary(x, y, fold, best_c_lr, bdry_dict, models_dict, 'lr')\n",
    "        best_bdry_rf = decision_boundary(x, y, fold, best_c_rf, bdry_dict, models_dict, 'rf')\n",
    "        best_bdry = [0.5, best_bdry_svm_rbf, best_bdry_svm_poly, best_bdry_lr, best_bdry_rf]\n",
    "        \n",
    "        # 最优参数建模\n",
    "        knn = KNeighborsClassifier(n_neighbors = int(best_c_knn))\n",
    "        knn.fit(x.values, y.values.ravel())\n",
    "        \n",
    "        svm_rbf = SVC(C=best_c_svm_rbf, probability = True)\n",
    "        svm_rbf.fit(x.values, y.values.ravel())\n",
    "        \n",
    "        svm_poly = SVC(C=best_c_svm_poly[0], kernel = 'poly', degree = best_c_svm_poly[1], probability = True)\n",
    "        svm_poly.fit(x.values, y.values.ravel())\n",
    "\n",
    "        lr = LogisticRegression(C = best_c_lr, penalty ='l1', warm_start = False)\n",
    "        lr.fit(x.values, y.values.ravel())\n",
    "\n",
    "        rf = RandomForestClassifier(n_jobs=-1, n_estimators = 100, criterion = 'entropy', \n",
    "                                    max_features = 'auto', max_depth = None, \n",
    "                                    min_samples_split  = int(best_c_rf), random_state=0)\n",
    "        rf.fit(x.values, y.values.ravel())\n",
    "        \n",
    "        models = [knn,svm_rbf,svm_poly, lr, rf]\n",
    "        return best_c,best_bdry,models\n",
    "    else:\n",
    "        #预测阶段\n",
    "        [knn, svm_rbf, svm_poly, lr, rf] = models\n",
    "        [_, best_bdry_svm_rbf, best_bdry_svm_poly, best_bdry_lr, best_bdry_rf] = best_bdry\n",
    "        \n",
    "        # KNN\n",
    "        y_pred_knn = knn.predict(x.values)\n",
    "        # 用rbf核的SVM\n",
    "        y_pred_svm_rbf = svm_rbf.predict_proba(x.values)[:,1] >= best_bdry_svm_rbf\n",
    "        # 用多项式核的SVM\n",
    "        y_pred_svm_poly = svm_poly.predict_proba(x.values)[:,1] >= best_bdry_svm_poly\n",
    "        # LR\n",
    "        y_pred_lr= lr.predict_proba(x.values)[:,1] >= best_bdry_lr\n",
    "        # 随机森林\n",
    "        y_pred_rf = rf.predict_proba(x.values)[:,1] >= best_bdry_rf\n",
    "        \n",
    "        x_of_three_models = {'knn' : y_pred_knn, 'svm_rbf' : y_pred_svm_rbf, 'svm_poly' : y_pred_svm_poly, 'lr' : y_pred_lr, 'rf': y_pred_rf}\n",
    "        \n",
    "        #得到5个模型的预测结果\n",
    "        X_5_data = pd.DataFrame(data = x_of_three_models)\n",
    "        \n",
    "        # 进行投票机制，大于2票的为正样本\n",
    "        y_prd= np.sum(x_5_data,axis=1)>=2\n",
    "        \n",
    "        y_pred_lr_controls = []\n",
    "        params = [0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]\n",
    "\n",
    "        # 投票器去产出最终结果\n",
    "        for param in params:\n",
    "            y_pred_lr_controls.append(lr.predict_proba(X.values)[:,1] >= param)\n",
    "        return y_pred, y_pred_lr_controls, params\n",
    "        \n",
    "        \n",
    "        \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run(data,mode,ratio,iteration1,bdry_dict):\n",
    "    recall_score_list =[]\n",
    "    auc_list = []\n",
    "    recall_score_lr_list =[]\n",
    "    auc_lr_list = []\n",
    "    best_c = None\n",
    "    best_bdry = None\n",
    "    for itr1 in range(iteration1):\n",
    "        #欺诈类的样本\n",
    "        fraud_indices=np.array(data[data.Class==1].index)\n",
    "        #进行随机排列\n",
    "        np.random.shuffle(fraud_indices)\n",
    "        \n",
    "        #获取正常样本\n",
    "        normal_indices=np.array(data[data.Class==0].index)\n",
    "        np.random.shuffle(normal_indices)\n",
    "        \n",
    "        \n",
    "        #划分训练集和测试集\n",
    "        train_normal_indices, train_fraud_indices, test_normal_indices, test_fraud_indices=split_train_test(\n",
    "            normal_indices,fraud_indices)\n",
    "        \n",
    "        ##合并测试集\n",
    "        test_indices=np.concatenate([test_normal_indices,test_fraud_indices])\n",
    "        \n",
    "        #通过下标选取测试集数据，[表示选取行,表示选取列]\n",
    "        test_data=data.iloc[test_indices,:]\n",
    "        x_test=test_data.ix[:,test_data.columns != 'Class']\n",
    "        y_test=test_data.ix[:,test_data.columns == 'Class']\n",
    "        \n",
    "        #数据下采样\n",
    "        x_train_undersample,y_train_undersample,train_normal_pos=getTrainingSample(\n",
    "            train_fraud_indices,train_normal_indices,data,0,ratio)\n",
    "        \n",
    "        #模型训练\n",
    "        best_c,best_bdry,models=model(x_train_undersample, y_train_undersample,train=True,\n",
    "                                         bdry_dict= bdry_dict, best_c=best_c, best_bdry=best_bdry)\n",
    "        \n",
    "        if show_best_c:\n",
    "            print(\"超参数值:\")\n",
    "            print(\"k-nearest nbd: %.2f, svm (rbf kernel): [%.2f, %.2f], svm (poly kernel): %.2f, logistic reg: %.2f, random forest: %.2f\"\n",
    "                  %(best_c[0], best_c[1], best_c[2][0], best_c[2][1], best_c[3], best_c[4]))\n",
    "\n",
    "        if show_bdry:\n",
    "            print(\"决策边界阈值:\")\n",
    "            print(\"k-nearest nbd: %.2f, svm (rbf kernel): %.2f, svm (poly kernel): %.2f, logistic reg: %.2f, random forest: %.2f\"\n",
    "                  %(best_bdry[0], best_bdry[1], best_bdry[2], best_bdry[3], best_bdry[4]))\n",
    "            \n",
    "        \n",
    "         # 预测\n",
    "        y_pred, y_pred_lr_controls, params = model(x_test, y_test, train = False, bdry_dict = None,\n",
    "                                                   best_c = best_c, best_bdry = best_bdry, models = models, mode = mode)\n",
    "        \n",
    "        #记录指标\n",
    "        recall_score, roc_auc= compute_recall_and_auc(y_test,y_pred)\n",
    "        recall_score_list.append(recall_score)\n",
    "        auc_list.append(roc_auc)\n",
    "        \n",
    "        control_recall_all_param = []\n",
    "        control_roc_all_param = []\n",
    "        for i in range(len(params)):\n",
    "            recall_score_lr, roc_auc_lr = compute_recall_and_auc(y_test, y_pred_lr_controls[i]) # for control\n",
    "            control_recall_all_param.append(recall_score_lr)\n",
    "            control_roc_all_param.append(roc_auc_lr)\n",
    "\n",
    "        recall_score_lr_list.append(control_recall_all_param)\n",
    "        auc_lr_list.append(control_roc_all_param)\n",
    "        \n",
    "        \n",
    "    #计算平均得分\n",
    "    mean_recall_score = np.mean(recall_score_list)\n",
    "    std_recall_score = np.std(recall_score_list)\n",
    "    \n",
    "    mean_auc= np.mean(auc_list)\n",
    "    std_auc = np.std(auc_list)\n",
    "        \n",
    "    mean_recall_score_lr = np.mean(recall_score_lr_list, axis = 0)\n",
    "    std_recall_score_lr = np.std(recall_score_lr_list, axis = 0)\n",
    "    mean_auc_lr= np.mean(auc_lr_list, axis = 0)\n",
    "    std_auc_lr = np.std(auc_lr_list, axis = 0)\n",
    "        \n",
    "    result = [mean_recall_score, std_recall_score, mean_auc, std_auc]\n",
    "    control = [mean_recall_score_lr, std_recall_score_lr, mean_auc_lr, std_auc_lr]\n",
    "    return result, control, params    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "knn 第 0 次： 0.9993719381987187\n",
      "knn 第 1 次： 0.9995227569577012\n",
      "knn 第 2 次： 0.9991210447011551\n",
      "knn 第 3 次： 0.9994224588188028\n",
      "knn 第 4 次： 0.9994473889126121\n",
      "knn 第 0 次： 0.9993219998493333\n",
      "knn 第 1 次： 0.9995730788548468\n",
      "knn 第 2 次： 0.999095999799111\n",
      "knn 第 3 次： 0.999472772463659\n",
      "knn 第 4 次： 0.9994474860615802\n",
      "knn 第 0 次： 0.9992718141870685\n",
      "knn 第 1 次： 0.9995479658463083\n",
      "knn 第 2 次： 0.9991210667738128\n",
      "knn 第 3 次： 0.9993974996234373\n",
      "knn 第 4 次： 0.9994725870859181\n",
      "knn 第 0 次： 0.9992969063881076\n",
      "knn 第 1 次： 0.9995479658463083\n",
      "knn 第 2 次： 0.9990959316926168\n",
      "knn 第 3 次： 0.9994225603173408\n",
      "knn 第 4 次： 0.9994976893711071\n",
      "svm_rbf 第 0 次： 0.9983442877928854\n",
      "svm_rbf 第 1 次： 0.9986201013598274\n",
      "svm_rbf 第 2 次： 0.9974177050166722\n",
      "svm_rbf 第 3 次： 0.9983450351053159\n",
      "svm_rbf 第 4 次： 0.9988454684637201\n",
      "svm_rbf 第 0 次： 0.9989457037427517\n",
      "svm_rbf 第 1 次： 0.9987453893056984\n",
      "svm_rbf 第 2 次： 0.9983437476473688\n",
      "svm_rbf 第 3 次： 0.9988208730556949\n",
      "svm_rbf 第 4 次： 0.9986449864498645\n",
      "svm_rbf 第 0 次： 0.9994474583082178\n",
      "svm_rbf 第 1 次： 0.9996985984829457\n",
      "svm_rbf 第 2 次： 0.9990958635758596\n",
      "svm_rbf 第 3 次： 0.9996986211919532\n",
      "svm_rbf 第 4 次： 0.9995981010273542\n",
      "svm_rbf 第 0 次： 0.9996482500439687\n",
      "svm_rbf 第 1 次： 0.9997237083364731\n",
      "svm_rbf 第 2 次： 0.9993215739484396\n",
      "svm_rbf 第 3 次： 0.9997990555611374\n",
      "svm_rbf 第 4 次： 0.9996231629192313\n",
      "svm_rbf 第 0 次： 0.9997236389216894\n",
      "svm_rbf 第 1 次： 0.999799025272572\n",
      "svm_rbf 第 2 次： 0.9994471945121491\n",
      "svm_rbf 第 3 次： 0.9998744254966472\n",
      "svm_rbf 第 4 次： 0.9996231723860725\n",
      "svm_poly 第 0 次： 0.998745074417087\n",
      "svm_poly 第 1 次： 0.9990712851405622\n",
      "svm_poly 第 2 次： 0.9987198152517697\n",
      "svm_poly 第 3 次： 0.9988958871797445\n",
      "svm_poly 第 4 次： 0.9991715827789632\n",
      "svm_poly 第 0 次： 0.9991966056589089\n",
      "svm_poly 第 1 次： 0.9990462804939263\n",
      "svm_poly 第 2 次： 0.9978182912455802\n",
      "svm_poly 第 3 次： 0.9992720700820804\n",
      "svm_poly 第 4 次： 0.9991715827789632\n",
      "svm_poly 第 0 次： 0.9983194963505481\n",
      "svm_poly 第 1 次： 0.9966952080516749\n",
      "svm_poly 第 2 次： 0.9978933112632609\n",
      "svm_poly 第 3 次： 0.9968453468866578\n",
      "svm_poly 第 4 次： 0.9972190208949241\n",
      "svm_poly 第 0 次： 0.9964699697068322\n",
      "svm_poly 第 1 次： 0.9975693487358107\n",
      "svm_poly 第 2 次： 0.9961944819988984\n",
      "svm_poly 第 3 次： 0.99786972757575\n",
      "svm_poly 第 4 次： 0.9977689762358367\n",
      "svm_poly 第 0 次： 0.9972189512189011\n",
      "svm_poly 第 1 次： 0.9971945293321978\n",
      "svm_poly 第 2 次： 0.9974430963601725\n",
      "svm_poly 第 3 次： 0.9973947895791583\n",
      "svm_poly 第 4 次： 0.9974689254210104\n",
      "svm_poly 第 0 次： 0.997119021970589\n",
      "svm_poly 第 1 次： 0.9970197099852238\n",
      "svm_poly 第 2 次： 0.9970431993585246\n",
      "svm_poly 第 3 次： 0.997994736182479\n",
      "svm_poly 第 4 次： 0.996969241558962\n",
      "svm_poly 第 0 次： 0.9969691656438644\n",
      "svm_poly 第 1 次： 0.997569287826392\n",
      "svm_poly 第 2 次： 0.9962443665498247\n",
      "svm_poly 第 3 次： 0.9972698810269255\n",
      "svm_poly 第 4 次： 0.996619760628975\n",
      "svm_poly 第 0 次： 0.9969441939685402\n",
      "svm_poly 第 1 次： 0.9970696521151101\n",
      "svm_poly 第 2 次： 0.9970681835266996\n"
     ]
    }
   ],
   "source": [
    "\n",
    "def lr_bdry_module(recall_acc, roc_auc):\n",
    "    return 0.9*recall_acc+0.1*roc_auc\n",
    "def svm_rbf_bdry_module(recall_acc, roc_auc):\n",
    "    return recall_acc*roc_auc\n",
    "def svm_poly_bdry_module(recall_acc, roc_auc):\n",
    "    return recall_acc*roc_auc\n",
    "def rf_bdry_module(recall_acc, roc_auc):\n",
    "    return 0.5*recall_acc+0.5*roc_auc\n",
    "\n",
    "bdry_dict = {'lr': lr_bdry_module,'svm_rbf': svm_rbf_bdry_module,\n",
    "             'svm_poly': svm_poly_bdry_module, 'rf': rf_bdry_module}\n",
    "\n",
    "result, control, params =run(data = data, mode = mode, ratio = ratio, iteration1 = iteration1, bdry_dict = bdry_dict)\n",
    "print(\"超参数值:\")\n",
    "print(\"比率为: \", ratio, \" 模式为: \", mode)\n",
    "print(\"knn, svm_rbf, svm_poly, lr 和 rf 投票产出的结果是:\")\n",
    "print(\"平均召回率为 \", result[0], \" 召回率标准差为 \", result[1])\n",
    "print(\"平均auc为 \", result[2], \" auc标准差为 \", result[3])\n",
    "print()\n",
    "print(\"调整逻辑回归不同的阈值\")\n",
    "print(\"我们把超过阈值的样本判定为positive(欺诈)\")\n",
    "for i, param in enumerate(params):\n",
    "    print(\"阈值\", param)\n",
    "    print(\"平均召回率 \", control[0][i], \" 召回率标准差 \", control[1][i])\n",
    "    print(\"平均auc为 \", control[2][i], \" auc标准差 \", control[3][i])\n",
    "    print()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
