{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "AB155B13302D459CBAAABA327849B76D",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "# 说明\n",
    "特征工程部分照搬社区的baseline未作改动，仅仅进行了模型的部分参数调整和基于stacking的模型融合。\n",
    "另外，由于是输出概率，后续按照回归去做，故删除了不平衡样本的处理，一开始当成分类去做，最高只能到0.8+，按照回归轻松0.9+\n",
    "参数仅做了简单的调整，非最优，线下0.9361018703876826，线上验证0.93536922"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: seaborn in .\\.venv\\lib\\site-packages (0.13.2)\n",
      "Requirement already satisfied: numpy!=1.24.0,>=1.20 in .\\.venv\\lib\\site-packages (from seaborn) (2.3.5)\n",
      "Requirement already satisfied: pandas>=1.2 in .\\.venv\\lib\\site-packages (from seaborn) (2.3.3)\n",
      "Requirement already satisfied: matplotlib!=3.6.1,>=3.4 in .\\.venv\\lib\\site-packages (from seaborn) (3.10.7)\n",
      "Requirement already satisfied: contourpy>=1.0.1 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.3.3)\n",
      "Requirement already satisfied: cycler>=0.10 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (0.12.1)\n",
      "Requirement already satisfied: fonttools>=4.22.0 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (4.60.1)\n",
      "Requirement already satisfied: kiwisolver>=1.3.1 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.4.9)\n",
      "Requirement already satisfied: packaging>=20.0 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (25.0)\n",
      "Requirement already satisfied: pillow>=8 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (12.0.0)\n",
      "Requirement already satisfied: pyparsing>=3 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (3.2.5)\n",
      "Requirement already satisfied: python-dateutil>=2.7 in .\\.venv\\lib\\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (2.9.0.post0)\n",
      "Requirement already satisfied: pytz>=2020.1 in .\\.venv\\lib\\site-packages (from pandas>=1.2->seaborn) (2025.2)\n",
      "Requirement already satisfied: tzdata>=2022.7 in .\\.venv\\lib\\site-packages (from pandas>=1.2->seaborn) (2025.2)\n",
      "Requirement already satisfied: six>=1.5 in .\\.venv\\lib\\site-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.4->seaborn) (1.17.0)\n",
      "Requirement already satisfied: scipy in .\\.venv\\lib\\site-packages (1.16.3)\n",
      "Requirement already satisfied: numpy<2.6,>=1.25.2 in .\\.venv\\lib\\site-packages (from scipy) (2.3.5)\n"
     ]
    }
   ],
   "source": [
    "\n",
    "!pip install seaborn\n",
    "!pip install scipy"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "C325760DBE164B398588C602C8C376BC",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "# 查看数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false,
    "id": "0A028E17E34B4CFEABE9DFF956D89741",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']\n",
    "plt.rcParams['axes.unicode_minus'] = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false,
    "id": "001F87A7D41644E4BA2EA25513B0D1B3",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "train=pd.read_csv(r'G:/大三内容/数据分析可视化/实践课/train_set.csv')\n",
    "test=pd.read_csv('G:/大三内容/数据分析可视化/实践课/test_set.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false,
    "id": "B8C543716B964973872549E7B4DFDFAA",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "data = pd.concat([train.drop(['y'],axis=1),test],axis=0).reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false,
    "id": "D7CE94A95A2544AE808C26A4B1901BA7",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "job :   ['management' 'technician' 'admin.' 'services' 'retired' 'student'\n",
      " 'blue-collar' 'unknown' 'entrepreneur' 'housemaid' 'self-employed'\n",
      " 'unemployed']\n",
      "marital :   ['married' 'divorced' 'single']\n",
      "education :   ['tertiary' 'primary' 'secondary' 'unknown']\n",
      "default :   ['no' 'yes']\n",
      "housing :   ['yes' 'no']\n",
      "loan :   ['no' 'yes']\n",
      "contact :   ['unknown' 'cellular' 'telephone']\n",
      "month :   ['may' 'apr' 'jul' 'jun' 'nov' 'aug' 'jan' 'feb' 'dec' 'oct' 'sep' 'mar']\n",
      "poutcome :   ['unknown' 'other' 'failure' 'success']\n"
     ]
    }
   ],
   "source": [
    "# 对object型数据查看unique\n",
    "str_features = []\n",
    "num_features=[]\n",
    "for col in train.columns:\n",
    "    if train[col].dtype=='object':\n",
    "        str_features.append(col)\n",
    "        print(col,':  ',train[col].unique())\n",
    "    if train[col].dtype=='int64' and col not in ['ID','y']:\n",
    "        num_features.append(col)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "29CB9E135078402DBE2F479858631C1D",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "# 特征工程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true,
    "id": "928046C510F54165A50D2FFBEFB4A5B5",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "from scipy.stats import chi2_contingency       # 数值型特征检验，检验特征与标签的关系\n",
    "from scipy.stats import f_oneway,ttest_ind     # 分类型特征检验，检验特征与标签的关系"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true,
    "id": "B5098300108B4CB784E1E1D8A1A7AF40",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "#----------数据集处理--------------#\n",
    "from sklearn.model_selection import train_test_split        # 划分训练集和验证集\n",
    "from sklearn.model_selection import KFold,StratifiedKFold   # k折交叉\n",
    "from imblearn.combine import SMOTETomek,SMOTEENN            # 综合采样\n",
    "from imblearn.over_sampling import SMOTE                    # 过采样\n",
    "from imblearn.under_sampling import RandomUnderSampler      # 欠采样\n",
    "\n",
    "#----------数据处理--------------#\n",
    "from sklearn.preprocessing import StandardScaler # 标准化\n",
    "from sklearn.preprocessing import OneHotEncoder  # 热独编码\n",
    "from sklearn.preprocessing import OrdinalEncoder"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "AB4FDF679736489B86708802E2B4E05B",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "## 特征处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "B76FEED533744B93AD63ED6825716E5C",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "**连续变量即数值化数据做标准化处理**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true,
    "id": "DD0BE0C3EF0D4755A0F255565CA064B3",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# 异常值处理\n",
    "def outlier_processing(dfx):\n",
    "    df = dfx.copy()\n",
    "    q1 = df.quantile(q=0.25)\n",
    "    q3 = df.quantile(q=0.75)\n",
    "    iqr = q3 - q1\n",
    "    Umin = q1 - 1.5*iqr\n",
    "    Umax = q3 + 1.5*iqr \n",
    "    df[df>Umax] = df[df<=Umax].max()\n",
    "    df[df<Umin] = df[df>=Umin].min()\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true,
    "id": "353D496D273C4456A8EF7C663D1C81C1",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "train['age']=outlier_processing(train['age'])\n",
    "train['day']=outlier_processing(train['day'])\n",
    "train['duration']=outlier_processing(train['duration'])\n",
    "train['campaign']=outlier_processing(train['campaign'])\n",
    "\n",
    "\n",
    "test['age']=outlier_processing(test['age'])\n",
    "test['day']=outlier_processing(test['day'])\n",
    "test['duration']=outlier_processing(test['duration'])\n",
    "test['campaign']=outlier_processing(test['campaign'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "id": "605C7188AA404524AA2B935E363D1E6C",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>age</th>\n",
       "      <th>balance</th>\n",
       "      <th>day</th>\n",
       "      <th>duration</th>\n",
       "      <th>campaign</th>\n",
       "      <th>pdays</th>\n",
       "      <th>previous</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>25317.000000</td>\n",
       "      <td>25317.000000</td>\n",
       "      <td>25317.000000</td>\n",
       "      <td>25317.000000</td>\n",
       "      <td>25317.000000</td>\n",
       "      <td>25317.000000</td>\n",
       "      <td>25317.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>40.859502</td>\n",
       "      <td>1357.555082</td>\n",
       "      <td>15.835289</td>\n",
       "      <td>234.235138</td>\n",
       "      <td>2.391437</td>\n",
       "      <td>40.248766</td>\n",
       "      <td>0.591737</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>10.387365</td>\n",
       "      <td>2999.822811</td>\n",
       "      <td>8.319480</td>\n",
       "      <td>175.395559</td>\n",
       "      <td>1.599851</td>\n",
       "      <td>100.213541</td>\n",
       "      <td>2.568313</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>18.000000</td>\n",
       "      <td>-8019.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>-1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>33.000000</td>\n",
       "      <td>73.000000</td>\n",
       "      <td>8.000000</td>\n",
       "      <td>103.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>-1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>39.000000</td>\n",
       "      <td>448.000000</td>\n",
       "      <td>16.000000</td>\n",
       "      <td>181.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>-1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>48.000000</td>\n",
       "      <td>1435.000000</td>\n",
       "      <td>21.000000</td>\n",
       "      <td>317.000000</td>\n",
       "      <td>3.000000</td>\n",
       "      <td>-1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>70.000000</td>\n",
       "      <td>102127.000000</td>\n",
       "      <td>31.000000</td>\n",
       "      <td>638.000000</td>\n",
       "      <td>6.000000</td>\n",
       "      <td>854.000000</td>\n",
       "      <td>275.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                age        balance           day      duration      campaign  \\\n",
       "count  25317.000000   25317.000000  25317.000000  25317.000000  25317.000000   \n",
       "mean      40.859502    1357.555082     15.835289    234.235138      2.391437   \n",
       "std       10.387365    2999.822811      8.319480    175.395559      1.599851   \n",
       "min       18.000000   -8019.000000      1.000000      0.000000      1.000000   \n",
       "25%       33.000000      73.000000      8.000000    103.000000      1.000000   \n",
       "50%       39.000000     448.000000     16.000000    181.000000      2.000000   \n",
       "75%       48.000000    1435.000000     21.000000    317.000000      3.000000   \n",
       "max       70.000000  102127.000000     31.000000    638.000000      6.000000   \n",
       "\n",
       "              pdays      previous  \n",
       "count  25317.000000  25317.000000  \n",
       "mean      40.248766      0.591737  \n",
       "std      100.213541      2.568313  \n",
       "min       -1.000000      0.000000  \n",
       "25%       -1.000000      0.000000  \n",
       "50%       -1.000000      0.000000  \n",
       "75%       -1.000000      0.000000  \n",
       "max      854.000000    275.000000  "
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train[num_features].describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "FA30A072639C418283A5F9FF7BB70A5A",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "**分类变量做编码处理**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true,
    "id": "B843C59639844E1A872AECAFE7926878",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "dummy_train=train.join(pd.get_dummies(train[str_features])).drop(str_features,axis=1).drop(['ID','y'],axis=1)\n",
    "dummy_test=test.join(pd.get_dummies(test[str_features])).drop(str_features,axis=1).drop(['ID'],axis=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "F2EA03BA7FAA45FF85F2CEDA21AB2F25",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "## 统计检验与特征筛选 \n",
    "\n",
    "\n",
    "**连续变量-连续变量  相关分析**\n",
    "\n",
    "**连续变量-分类变量  T检验/方差分析**\n",
    "\n",
    "**分类变量-分类变量  卡方检验**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "500456AC13CA4815858E06D09EE17A9E",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "**对类别标签（离散变量）用卡方检验分析重要性**\n",
    "\n",
    "卡方检验认为显著水平大于95%是差异性显著的，这里即看p值是否是p>0.05，若p>0.05，则说明特征不会呈现差异性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "id": "40798A95334C4A85812FCB8A3CE6DD75",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "job 卡方检验p值: 0.0000\n",
      "marital 卡方检验p值: 0.0000\n",
      "education 卡方检验p值: 0.0000\n",
      "default 卡方检验p值: 0.0001\n",
      "housing 卡方检验p值: 0.0000\n",
      "loan 卡方检验p值: 0.0000\n",
      "contact 卡方检验p值: 0.0000\n",
      "month 卡方检验p值: 0.0000\n",
      "poutcome 卡方检验p值: 0.0000\n"
     ]
    }
   ],
   "source": [
    "for col in str_features:\n",
    "    obs=pd.crosstab(train['y'],\n",
    "                    train[col],\n",
    "                    rownames=['y'],\n",
    "                    colnames=[col])\n",
    "    chi2, p, dof, expect = chi2_contingency(obs)\n",
    "    print(\"{} 卡方检验p值: {:.4f}\".format(col,p))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "30A24D4C89AF4BDAA0115FDF8F7C12E5",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "**对连续变量做方差分析进行特征筛选**\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "id": "43185B05C4AB48628437C4224F86F348",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "scores_: [  13.38856992   84.16396612   25.76507245 4405.56959938  193.97418155\n",
      "  296.33099313  199.09942912]\n",
      "pvalues_: [2.53676251e-04 4.89124305e-20 3.88332900e-07 0.00000000e+00\n",
      " 6.26768275e-44 4.93591331e-66 4.86613654e-45]\n",
      "selected index: [0 1 2 3 4 5 6]\n"
     ]
    }
   ],
   "source": [
    "from sklearn.feature_selection import SelectKBest,f_classif\n",
    "\n",
    "f,p=f_classif(train[num_features],train['y'])\n",
    "k = f.shape[0] - (p > 0.05).sum()\n",
    "selector = SelectKBest(f_classif, k=k)\n",
    "selector.fit(train[num_features],train['y'])\n",
    "\n",
    "print('scores_:',selector.scores_)\n",
    "print('pvalues_:',selector.pvalues_)\n",
    "print('selected index:',selector.get_support(True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true,
    "id": "5AC3B3DFF3134BDC83E167086A48540B",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[ 0.20607159  0.10979888  0.59116242 ... -0.56411009 -0.37156467\n",
      "  1.07252597]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.35554638  1.23957933 -0.41788463 ... -0.35254615 -0.43055229\n",
      " -0.43921964]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.8216167  -1.06202109 -0.22060571 ... -0.34080791  0.26020307\n",
      "  0.38040527]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.48026759 -0.77104466 -0.89647791 ...  2.3020699   2.3020699\n",
      "  2.3020699 ]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.24467553 -0.86974621 -0.24467553 ... -0.24467553 -0.24467553\n",
      "  0.38039515]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.41161683  2.10306306 -0.41161683 ... -0.41161683 -0.41161683\n",
      " -0.41161683]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:4: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.23040357  0.54833312 -0.23040357 ... -0.23040357 -0.23040357\n",
      " -0.23040357]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[ 0.97625326 -0.85292821  1.84270764 ... -0.27529196  0.01352617\n",
      " -0.6603828 ]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.39454946  1.56727183 -0.45255403 ... -0.3075426  -0.19153346\n",
      "  4.82819549]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[ 1.58242724  0.50060747  1.70262943 ...  0.86121406 -0.22060571\n",
      "  0.50060747]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[ 0.42057119 -0.70832804 -0.59429781 ... -0.23510261 -0.56579026\n",
      "  1.07624498]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[ 0.38039515 -0.24467553  0.38039515 ... -0.24467553  0.38039515\n",
      "  1.63053651]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.41161683 -0.41161683 -0.41161683 ... -0.41161683  3.22069856\n",
      " -0.41161683]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n",
      "C:\\Users\\nickc\\AppData\\Local\\Temp\\ipykernel_177052\\474632271.py:5: FutureWarning: Setting an item of incompatible dtype is deprecated and will raise in a future error of pandas. Value '[-0.23040357 -0.23040357 -0.23040357 ... -0.23040357  0.54833312\n",
      " -0.23040357]' has dtype incompatible with int64, please explicitly cast to a compatible dtype first.\n",
      "  dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])\n"
     ]
    }
   ],
   "source": [
    "# 标准化，返回值为标准化后的数据\n",
    "standardScaler=StandardScaler()\n",
    "ss=standardScaler.fit(dummy_train.loc[:,num_features])\n",
    "dummy_train.loc[:,num_features]=ss.transform(dummy_train.loc[:,num_features])\n",
    "dummy_test.loc[:,num_features]=ss.transform(dummy_test.loc[:,num_features])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true,
    "id": "EEEA48883D2E4FA58FB392991E531C86",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "X=dummy_train\n",
    "y=train['y']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "A16ACA2680A243C0880DE1F19CFB6AAD",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "**因为后续是进行回归而非分类，个人认为没有必要进行不平衡处理，故此部分就注释掉了**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true,
    "id": "AFE7C8F818B148178A42245BBF4C8878",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# X_train,X_valid,y_train,y_valid=train_test_split(X,y,test_size=0.2,random_state=2020)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true,
    "id": "046EFE395ED34BFCAD11F2721BBE40FD",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# smote_tomek = SMOTETomek(random_state=2020)\n",
    "# X_resampled, y_resampled = smote_tomek.fit_resample(X, y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "13F7B7216054465D9437CEB872026BC3",
    "jupyter": {},
    "mdEditEnable": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "# 数据建模"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "id": "0E1FC19C7DF74042B6F1A9F5527AA4AE",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "#----------建模工具--------------#\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn.model_selection import KFold,RepeatedKFold\n",
    "import lightgbm as lgb\n",
    "from sklearn.ensemble import RandomForestRegressor\n",
    "import xgboost as xgb\n",
    "from xgboost import XGBRegressor\n",
    "from sklearn.linear_model import BayesianRidge\n",
    "from catboost import CatBoostRegressor, Pool\n",
    "from lightgbm import LGBMRegressor\n",
    "#----------模型评估工具----------#\n",
    "from sklearn.metrics import confusion_matrix # 混淆矩阵\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.metrics import recall_score,f1_score\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.metrics import roc_curve,auc\n",
    "from sklearn.metrics import roc_auc_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "D59F8B42DDB34650B81C23D56AE0BB43",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "## 模型建立和参数调整"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "B6C0DDA8DA53482ABA9E2EE753DB105D",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于GridSearchCV的随机森林参数调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "id": "4451159F5E62465CB52012889AD801B2",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# 随机森林\n",
    "# param = {'n_estimators':[1500,1700,2000],\n",
    "#          'max_features':[7,11,15]\n",
    "#         }\n",
    "# gs = GridSearchCV(estimator=RandomForestRegressor(), param_grid=param, cv=3, scoring=\"neg_mean_squared_error\", n_jobs=-1, verbose=10) \n",
    "# gs.fit(X_resampled,y_resampled)\n",
    "# print(gs.best_params_) \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "821CD114AA87471B9188366261086B72",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于五折交叉验证的随机森林"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "id": "46ED98A1821C450E85F2338B49A197C6",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.929373220326099\n"
     ]
    }
   ],
   "source": [
    "n_fold = 5\n",
    "folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)\n",
    "oof_rf = np.zeros(len(X))\n",
    "prediction_rf = np.zeros(len(dummy_test))\n",
    "for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n",
    "    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]\n",
    "    y_train, y_valid = y[train_index], y[valid_index]\n",
    "#     smote_tomek = SMOTETomek(random_state=2022)\n",
    "#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)\n",
    "    model_rf = RandomForestRegressor(max_features=11,min_samples_leaf=1,n_estimators=1700,random_state=2022).fit(X_train,y_train)\n",
    "    y_pred_valid = model_rf.predict(X_valid)\n",
    "    y_pred = model_rf.predict(dummy_test)\n",
    "    oof_rf[valid_index] = y_pred_valid.reshape(-1, )\n",
    "    prediction_rf += y_pred\n",
    "prediction_rf /= n_fold \n",
    "print(roc_auc_score(y, oof_rf))\n",
    "#0.929373220326099"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "A03808ADDE7A4AC4AEFDC87F09A5A017",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于GridSearchCV的XGB参数调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "id": "A26971BEEF684860B739522723029593",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# param = {'max_depth': [3],\n",
    "#          'learning_rate': [0.01],\n",
    "#         'subsample':[0.8],\n",
    "#         'colsample_bytree':[0.6],\n",
    "#          'n_estimators': [8000]\n",
    "\n",
    "#         }\n",
    "# gs = GridSearchCV(estimator=XGBRegressor(), param_grid=param, cv=3, scoring=\"neg_mean_squared_error\", n_jobs=-1, verbose=10) \n",
    "# gs.fit(X,y)\n",
    "# print(gs.best_params_) \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "7AF59354E891478D8154F963C5EDE251",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于五折交叉验证的XGB"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "id": "C0C60825A129420F8F4C983DEC926AC6",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "XGBModel.fit() got an unexpected keyword argument 'early_stopping_rounds'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mTypeError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[22]\u001b[39m\u001b[32m, line 13\u001b[39m\n\u001b[32m      8\u001b[39m \u001b[38;5;66;03m#     smote_tomek = SMOTETomek(random_state=2022)\u001b[39;00m\n\u001b[32m      9\u001b[39m \u001b[38;5;66;03m#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)\u001b[39;00m\n\u001b[32m     10\u001b[39m     eval_set = [(X_valid, y_valid)]\n\u001b[32m     11\u001b[39m     model_xgb = \u001b[43mXGBRegressor\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m     12\u001b[39m \u001b[43m        \u001b[49m\u001b[43mmax_depth\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m9\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43mlearning_rate\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m0.01\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43mn_estimators\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m10000\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43mcolsample_bytree\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m0.6\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43msubsample\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m0.8\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43mrandom_state\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m2022\u001b[39;49m\n\u001b[32m---> \u001b[39m\u001b[32m13\u001b[39m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43mearly_stopping_rounds\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m100\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43meval_metric\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mauc\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43meval_set\u001b[49m\u001b[43m=\u001b[49m\u001b[43meval_set\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n\u001b[32m     14\u001b[39m     y_pred_valid = model_xgb.predict(X_valid)\n\u001b[32m     15\u001b[39m     y_pred = model_xgb.predict(dummy_test)\n",
      "\u001b[36mFile \u001b[39m\u001b[32mg:\\大三内容\\数据分析可视化\\实践课\\.venv\\Lib\\site-packages\\xgboost\\core.py:774\u001b[39m, in \u001b[36mrequire_keyword_args.<locals>.throw_if.<locals>.inner_f\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m    772\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m k, arg \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(sig.parameters, args):\n\u001b[32m    773\u001b[39m     kwargs[k] = arg\n\u001b[32m--> \u001b[39m\u001b[32m774\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[31mTypeError\u001b[39m: XGBModel.fit() got an unexpected keyword argument 'early_stopping_rounds'"
     ]
    }
   ],
   "source": [
    "n_fold = 5\n",
    "folds = KFold(n_splits=n_fold, shuffle=True, random_state=2022)\n",
    "oof_xgb = np.zeros(len(X))\n",
    "prediction_xgb = np.zeros(len(dummy_test))\n",
    "for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n",
    "    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]\n",
    "    y_train, y_valid = y[train_index], y[valid_index]\n",
    "#     smote_tomek = SMOTETomek(random_state=2022)\n",
    "#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)\n",
    "    eval_set = [(X_valid, y_valid)]\n",
    "    model_xgb = XGBRegressor(\n",
    "        max_depth=9,learning_rate=0.01,n_estimators=10000,colsample_bytree=0.6,subsample=0.8,random_state=2022\n",
    "    ).fit(X_train,y_train,early_stopping_rounds=100, eval_metric=\"auc\",eval_set=eval_set, verbose=True)\n",
    "    y_pred_valid = model_xgb.predict(X_valid)\n",
    "    y_pred = model_xgb.predict(dummy_test)\n",
    "    oof_xgb[valid_index] = y_pred_valid.reshape(-1, )\n",
    "    prediction_xgb += y_pred\n",
    "prediction_xgb /= n_fold \n",
    "print(roc_auc_score(y, oof_xgb))\n",
    "# 0.9326219985474677"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "D03CB6619D8B44B08E990ED3EA01C61D",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于GridSearchCV的LGBM参数调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "082D0D1C79D14C0BB07870203B74F5F8",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'colsample_bytree': 0.8, 'learning_rate': 0.01, 'max_depth': 30, 'n_estimators': 10000, 'num_leaves': 59, 'subsample': 0.7}\n"
     ]
    }
   ],
   "source": [
    "# param = {'max_depth': [30],\n",
    "#          'learning_rate': [0.01],\n",
    "#          'num_leaves': [59],\n",
    "#          'subsample': [0.7],\n",
    "#          'colsample_bytree': [0.8],\n",
    "#          'n_estimators': [10000]}\n",
    "# gs = GridSearchCV(estimator=LGBMRegressor(), param_grid=param, cv=5, scoring=\"neg_mean_squared_error\", n_jobs=-1) \n",
    "# gs.fit(X_resampled,y_resampled)\n",
    "# print(gs.best_params_) \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "23ADF0543B77452A8DCC799C0582D474",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于五折交叉验证的LGBM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "D2313DA33FCA4D7EA5F3C32DD98D6BBA",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "LGBMRegressor.fit() got an unexpected keyword argument 'verbose'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mTypeError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[28]\u001b[39m\u001b[32m, line 23\u001b[39m\n\u001b[32m     20\u001b[39m \u001b[38;5;66;03m#     smote_tomek = SMOTETomek(random_state=2022)\u001b[39;00m\n\u001b[32m     21\u001b[39m \u001b[38;5;66;03m#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)\u001b[39;00m\n\u001b[32m     22\u001b[39m     model = lgb.LGBMRegressor(**params)\n\u001b[32m---> \u001b[39m\u001b[32m23\u001b[39m     \u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m     24\u001b[39m \u001b[43m              \u001b[49m\u001b[43meval_set\u001b[49m\u001b[43m=\u001b[49m\u001b[43m[\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_valid\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_valid\u001b[49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m     25\u001b[39m \u001b[43m              \u001b[49m\u001b[43meval_metric\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mauc\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m     26\u001b[39m \u001b[43m              \u001b[49m\u001b[43mverbose\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m50\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mearly_stopping_rounds\u001b[49m\u001b[43m=\u001b[49m\u001b[32;43m200\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[32m     27\u001b[39m     y_pred_valid = model.predict(X_valid)\n\u001b[32m     28\u001b[39m     y_pred = model.predict(dummy_test, num_iteration=model.best_iteration_)\n",
      "\u001b[31mTypeError\u001b[39m: LGBMRegressor.fit() got an unexpected keyword argument 'verbose'"
     ]
    }
   ],
   "source": [
    "n_fold = 5\n",
    "folds = KFold(n_splits=n_fold, shuffle=True,random_state=1314)\n",
    "params = {\n",
    "    'learning_rate':0.01,\n",
    "    'subsample': 0.7,\n",
    "    'num_leaves': 59,\n",
    "    'n_estimators':1500,\n",
    "    'max_depth': 30,\n",
    "    'colsample_bytree': 0.8,\n",
    "    'verbose': -1,\n",
    "    'seed': 2022,\n",
    "    'n_jobs': -1\n",
    "}\n",
    "\n",
    "oof_lgb = np.zeros(len(X))\n",
    "predictions_lgb  = np.zeros(len(dummy_test))\n",
    "for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n",
    "    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]\n",
    "    y_train, y_valid = y[train_index], y[valid_index]\n",
    "#     smote_tomek = SMOTETomek(random_state=2022)\n",
    "#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)\n",
    "    model = lgb.LGBMRegressor(**params)\n",
    "    model.fit(X_train, y_train,\n",
    "              eval_set=[(X_train, y_train), (X_valid, y_valid)],\n",
    "              eval_metric='auc',\n",
    "              verbose=50, early_stopping_rounds=200)\n",
    "    y_pred_valid = model.predict(X_valid)\n",
    "    y_pred = model.predict(dummy_test, num_iteration=model.best_iteration_)\n",
    "    oof_lgb[valid_index] = y_pred_valid.reshape(-1, )\n",
    "    predictions_lgb  += y_pred\n",
    "predictions_lgb  /= n_fold\n",
    "print(roc_auc_score(y, oof_lgb))\n",
    "# 0.9342991211145983"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "1D0CF99ED0F04505ADAF99D733B04257",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于GridSearchCV的catboost参数调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "id": "40143CE0A1F7461A9D5AAFEB614609B5",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [],
   "source": [
    "\n",
    "# param = {'depth': [7,9,11],\n",
    "#          'learning_rate': [0.01],\n",
    "#          'iterations':  [8000]}\n",
    "# gs = GridSearchCV(estimator=CatBoostRegressor(), param_grid=param, cv=3, scoring=\"neg_mean_squared_error\", n_jobs=-1) \n",
    "# gs.fit(X_resampled,y_resampled)\n",
    "# print(gs.best_params_) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "0E595CF0D95341E69AB1154B1849F14E",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于五折交叉验证的catboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "3A557EC583C0481488702DC367D10F53",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0:\tlearn: 0.3201355\ttest: 0.3215319\tbest: 0.3215319 (0)\ttotal: 9.15ms\tremaining: 3m 48s\n",
      "1000:\tlearn: 0.2210492\ttest: 0.2482059\tbest: 0.2482059 (1000)\ttotal: 10.3s\tremaining: 4m 7s\n",
      "Stopped by overfitting detector  (300 iterations wait)\n",
      "\n",
      "bestTest = 0.2468782072\n",
      "bestIteration = 1477\n",
      "\n",
      "Shrink model to first 1478 iterations.\n",
      "0:\tlearn: 0.3208935\ttest: 0.3185105\tbest: 0.3185105 (0)\ttotal: 15.1ms\tremaining: 6m 18s\n",
      "1000:\tlearn: 0.2206024\ttest: 0.2464808\tbest: 0.2464751 (998)\ttotal: 13.9s\tremaining: 5m 32s\n",
      "2000:\tlearn: 0.1971871\ttest: 0.2454924\tbest: 0.2454188 (1725)\ttotal: 25.1s\tremaining: 4m 48s\n",
      "Stopped by overfitting detector  (300 iterations wait)\n",
      "\n",
      "bestTest = 0.2454188002\n",
      "bestIteration = 1725\n",
      "\n",
      "Shrink model to first 1726 iterations.\n",
      "0:\tlearn: 0.3214860\ttest: 0.3161403\tbest: 0.3161403 (0)\ttotal: 10.4ms\tremaining: 4m 19s\n",
      "1000:\tlearn: 0.2210140\ttest: 0.2480678\tbest: 0.2480521 (995)\ttotal: 10.3s\tremaining: 4m 6s\n",
      "Stopped by overfitting detector  (300 iterations wait)\n",
      "\n",
      "bestTest = 0.24699407\n",
      "bestIteration = 1556\n",
      "\n",
      "Shrink model to first 1557 iterations.\n",
      "0:\tlearn: 0.3186885\ttest: 0.3274174\tbest: 0.3274174 (0)\ttotal: 11.9ms\tremaining: 4m 56s\n",
      "1000:\tlearn: 0.2202664\ttest: 0.2525361\tbest: 0.2525361 (1000)\ttotal: 12.8s\tremaining: 5m 6s\n",
      "Stopped by overfitting detector  (300 iterations wait)\n",
      "\n",
      "bestTest = 0.2515410448\n",
      "bestIteration = 1603\n",
      "\n",
      "Shrink model to first 1604 iterations.\n",
      "0:\tlearn: 0.3209030\ttest: 0.3185929\tbest: 0.3185929 (0)\ttotal: 14.9ms\tremaining: 6m 11s\n",
      "1000:\tlearn: 0.2212537\ttest: 0.2523067\tbest: 0.2523067 (1000)\ttotal: 10.6s\tremaining: 4m 15s\n",
      "Stopped by overfitting detector  (300 iterations wait)\n",
      "\n",
      "bestTest = 0.250998901\n",
      "bestIteration = 1618\n",
      "\n",
      "Shrink model to first 1619 iterations.\n"
     ]
    }
   ],
   "source": [
    "# 本地交叉验证\n",
    "n_fold = 5\n",
    "folds = KFold(n_splits=n_fold, shuffle=True, random_state=1314)\n",
    "\n",
    "oof_cat = np.zeros(len(X))\n",
    "prediction_cat = np.zeros(len(dummy_test))\n",
    "for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n",
    "    X_train, X_valid = X.iloc[train_index], X.iloc[valid_index]\n",
    "    y_train, y_valid = y[train_index], y[valid_index]\n",
    "#     smote_tomek = SMOTETomek(random_state=2022)\n",
    "#     X_resampled, y_resampled = smote_tomek.fit_resample(X_train, y_train)\n",
    "    train_pool = Pool(X_train, y_train)\n",
    "    eval_pool = Pool(X_valid, y_valid)\n",
    "    cbt_model = CatBoostRegressor(iterations=25000, # 注：baseline 提到的分数是用 iterations=60000 得到的，但运行时间有点久\n",
    "                           learning_rate=0.01, # 注：事实上好几个 property 在 lr=0.1 时收敛巨慢。后面可以考虑调大\n",
    "#                            eval_metric='SMAPE',\n",
    "                                  depth=9,\n",
    "                           use_best_model=True,\n",
    "                           random_seed=2022,\n",
    "                           logging_level='Verbose',\n",
    "                           #task_type='GPU',\n",
    "                           devices='0',\n",
    "                           gpu_ram_part=0.5,\n",
    "                           early_stopping_rounds=300)\n",
    "    \n",
    "    cbt_model.fit(train_pool,\n",
    "              eval_set=eval_pool,\n",
    "              verbose=1000)\n",
    "\n",
    "    y_pred_valid = cbt_model.predict(X_valid)\n",
    "    y_pred_c = cbt_model.predict(dummy_test)\n",
    "    oof_cat[valid_index] = y_pred_valid.reshape(-1, )\n",
    "    prediction_cat += y_pred_c\n",
    "prediction_cat /= n_fold \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "27A63B61DB094FCF95CEEB6A1E3ADC5A",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.935264298588153\n"
     ]
    }
   ],
   "source": [
    "print(roc_auc_score(y, oof_cat))\n",
    "# 0.935264298588153"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "9470A77D276E418F9CB2ACD8D18E6F68",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "### 基于stacking的模型融合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "4A24F209D61E47E7A27F4A26D6687B89",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'prediction_lgb' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[62]\u001b[39m\u001b[32m, line 6\u001b[39m\n\u001b[32m      4\u001b[39m \u001b[38;5;66;03m# 将多个模型的结果进行stacking（叠加）\u001b[39;00m\n\u001b[32m      5\u001b[39m train_stack = np.vstack([oof_rf,oof_lgb,oof_cat,oof_xgb]).transpose()\n\u001b[32m----> \u001b[39m\u001b[32m6\u001b[39m test_stack = np.vstack([prediction_rf,\u001b[43mprediction_lgb\u001b[49m,prediction_cat,prediction_xgb]).transpose()\n\u001b[32m      7\u001b[39m \u001b[38;5;66;03m#贝叶斯分类器也使用交叉验证的方法，5折，重复2次\u001b[39;00m\n\u001b[32m      8\u001b[39m folds_stack = RepeatedKFold(n_splits=\u001b[32m5\u001b[39m, n_repeats=\u001b[32m2\u001b[39m, random_state=\u001b[32m2018\u001b[39m)\n",
      "\u001b[31mNameError\u001b[39m: name 'prediction_lgb' is not defined"
     ]
    }
   ],
   "source": [
    "# from sklearn.linear_model import Bayesian\n",
    "from sklearn.metrics import mean_squared_error,mean_absolute_error,make_scorer\n",
    "\n",
    "# 将多个模型的结果进行stacking（叠加）\n",
    "train_stack = np.vstack([oof_rf,oof_lgb,oof_cat,oof_xgb]).transpose()\n",
    "test_stack = np.vstack([prediction_rf,prediction_lgb,prediction_cat,prediction_xgb]).transpose()\n",
    "#贝叶斯分类器也使用交叉验证的方法，5折，重复2次\n",
    "folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=2018)\n",
    "oof_stack = np.zeros(train_stack.shape[0])\n",
    "predictions = np.zeros(test_stack.shape[0])\n",
    " \n",
    "for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack,y)):\n",
    "    print(\"fold {}\".format(fold_))\n",
    "    trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values\n",
    "    val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values#\n",
    "    \n",
    "    clf_3 = BayesianRidge()\n",
    "    clf_3.fit(trn_data, trn_y)\n",
    "    \n",
    "    oof_stack[val_idx] = clf_3.predict(val_data)#对验证集有一个预测，用于后面计算模型的偏差\n",
    "    predictions += clf_3.predict(test_stack) / 10#对测试集的预测，除以10是因为5折交叉验证重复了2次\n",
    "    \n",
    "mean_squared_error(y.values, oof_stack)#计算出模型在训练集上的均方误差\n",
    "print(\"CV score: {:<8.8f}\".format(mean_squared_error(y.values, oof_stack)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "33AB2F850236462DB7F0DFE549A2C559",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'oof_stack' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[61]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28mprint\u001b[39m(roc_auc_score(y, \u001b[43moof_stack\u001b[49m))\n\u001b[32m      2\u001b[39m \u001b[38;5;66;03m# 0.9361018703876826\u001b[39;00m\n",
      "\u001b[31mNameError\u001b[39m: name 'oof_stack' is not defined"
     ]
    }
   ],
   "source": [
    "print(roc_auc_score(y, oof_stack))\n",
    "# 0.9361018703876826"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "1D7C809913E14DD79D1E399F519EDC1C",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "source": [
    "# 保存结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true,
    "id": "BC37DA49289D45C2951D2FFDC91043BA",
    "jupyter": {},
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": [],
    "trusted": true
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'predictions' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[30]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m test[\u001b[33m'\u001b[39m\u001b[33mpred\u001b[39m\u001b[33m'\u001b[39m] = \u001b[43mpredictions\u001b[49m\n\u001b[32m      2\u001b[39m test[[\u001b[33m'\u001b[39m\u001b[33mID\u001b[39m\u001b[33m'\u001b[39m, \u001b[33m'\u001b[39m\u001b[33mpred\u001b[39m\u001b[33m'\u001b[39m]].to_csv(\u001b[33mr\u001b[39m\u001b[33m'\u001b[39m\u001b[33mG:/大三内容/数据分析可视化/实践课/sub.csv\u001b[39m\u001b[33m'\u001b[39m, index=\u001b[38;5;28;01mNone\u001b[39;00m, encoding=\u001b[33m\"\u001b[39m\u001b[33mutf-8\u001b[39m\u001b[33m\"\u001b[39m)\n",
      "\u001b[31mNameError\u001b[39m: name 'predictions' is not defined"
     ]
    }
   ],
   "source": [
    "test['pred'] = predictions\n",
    "test[['ID', 'pred']].to_csv(r'G:/大三内容/数据分析可视化/实践课/sub.csv', index=None, encoding=\"utf-8\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
