{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# B 13-5 Logistic 逐步回归\n",
    "\n",
    "结果变量为二项分类变量，此时不适用线性回归，应使用 Logistic 回归。与多元线性回归相似，并不是每一个因素都对模型有贡献，因而需要对变量进行筛选。\n",
    "\n",
    "## 案例\n",
    "\n",
    "研究骨折疗效 (Y) 与 骨折类型 (X1)、治疗方法 (X2)、服药情况 (X3)、治疗周数 (X4) 之间的关系。\n",
    "\n",
    "### 数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div><style>\n",
       ".dataframe > thead > tr,\n",
       ".dataframe > tbody > tr {\n",
       "  text-align: right;\n",
       "  white-space: pre-wrap;\n",
       "}\n",
       "</style>\n",
       "<small>shape: (510, 6)</small><table border=\"1\" class=\"dataframe\"><thead><tr><th>level</th><th>X1</th><th>X2</th><th>X3</th><th>X4</th><th>Y</th></tr><tr><td>u8</td><td>u8</td><td>u8</td><td>u8</td><td>u8</td><td>u8</td></tr></thead><tbody><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>1</td><td>0</td></tr><tr><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>3</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>3</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>3</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>3</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>3</td><td>1</td></tr></tbody></table></div>"
      ],
      "text/plain": [
       "shape: (510, 6)\n",
       "┌───────┬─────┬─────┬─────┬─────┬─────┐\n",
       "│ level ┆ X1  ┆ X2  ┆ X3  ┆ X4  ┆ Y   │\n",
       "│ ---   ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n",
       "│ u8    ┆ u8  ┆ u8  ┆ u8  ┆ u8  ┆ u8  │\n",
       "╞═══════╪═════╪═════╪═════╪═════╪═════╡\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 1   ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 1   ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 1   ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 1   ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 1   ┆ 0   │\n",
       "│ …     ┆ …   ┆ …   ┆ …   ┆ …   ┆ …   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 3   ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 3   ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 3   ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 3   ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 3   ┆ 1   │\n",
       "└───────┴─────┴─────┴─────┴─────┴─────┘"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import polars as pl\n",
    "\n",
    "with open(\"B_13_5-data.csv\", \"r\") as reader:\n",
    "    header = reader.readline().split(\",\")\n",
    "\n",
    "    df_raw = pl.DataFrame(schema={h: pl.UInt8 for h in header[:-1]})\n",
    "\n",
    "    while line := reader.readline():\n",
    "        line = line.strip().split(\",\")\n",
    "\n",
    "        for i in range(int(line[-1])):\n",
    "            new_df = pl.DataFrame(data=[line[:-1]], schema={h: pl.UInt8 for h in header[:-1]}, orient=\"row\")\n",
    "            df_raw = df_raw.vstack(new_df)\n",
    "\n",
    "df_raw"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对 X4 进行哑变量变换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div><style>\n",
       ".dataframe > thead > tr,\n",
       ".dataframe > tbody > tr {\n",
       "  text-align: right;\n",
       "  white-space: pre-wrap;\n",
       "}\n",
       "</style>\n",
       "<small>shape: (510, 7)</small><table border=\"1\" class=\"dataframe\"><thead><tr><th>level</th><th>X1</th><th>X2</th><th>X3</th><th>X4_2</th><th>X4_3</th><th>Y</th></tr><tr><td>u8</td><td>u8</td><td>u8</td><td>u8</td><td>u8</td><td>u8</td><td>u8</td></tr></thead><tbody><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>1</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td><td>0</td></tr><tr><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td><td>&hellip;</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr><tr><td>24</td><td>1</td><td>1</td><td>1</td><td>0</td><td>1</td><td>1</td></tr></tbody></table></div>"
      ],
      "text/plain": [
       "shape: (510, 7)\n",
       "┌───────┬─────┬─────┬─────┬──────┬──────┬─────┐\n",
       "│ level ┆ X1  ┆ X2  ┆ X3  ┆ X4_2 ┆ X4_3 ┆ Y   │\n",
       "│ ---   ┆ --- ┆ --- ┆ --- ┆ ---  ┆ ---  ┆ --- │\n",
       "│ u8    ┆ u8  ┆ u8  ┆ u8  ┆ u8   ┆ u8   ┆ u8  │\n",
       "╞═══════╪═════╪═════╪═════╪══════╪══════╪═════╡\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 0    ┆ 0    ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 0    ┆ 0    ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 0    ┆ 0    ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 0    ┆ 0    ┆ 0   │\n",
       "│ 1     ┆ 0   ┆ 0   ┆ 0   ┆ 0    ┆ 0    ┆ 0   │\n",
       "│ …     ┆ …   ┆ …   ┆ …   ┆ …    ┆ …    ┆ …   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 0    ┆ 1    ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 0    ┆ 1    ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 0    ┆ 1    ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 0    ┆ 1    ┆ 1   │\n",
       "│ 24    ┆ 1   ┆ 1   ┆ 1   ┆ 0    ┆ 1    ┆ 1   │\n",
       "└───────┴─────┴─────┴─────┴──────┴──────┴─────┘"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_dum = df_raw.to_dummies(columns=[\"X4\"], drop_first=True)\n",
    "\n",
    "df_dum"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Logistic 回归"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Optimization terminated successfully.\n",
      "         Current function value: 0.399089\n",
      "         Iterations 7\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<table class=\"simpletable\">\n",
       "<caption>Logit Regression Results</caption>\n",
       "<tr>\n",
       "  <th>Dep. Variable:</th>           <td>Y</td>        <th>  No. Observations:  </th>  <td>   510</td>  \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Model:</th>                 <td>Logit</td>      <th>  Df Residuals:      </th>  <td>   504</td>  \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Method:</th>                 <td>MLE</td>       <th>  Df Model:          </th>  <td>     5</td>  \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Date:</th>            <td>Tue, 10 Dec 2024</td> <th>  Pseudo R-squ.:     </th>  <td>0.3393</td>  \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Time:</th>                <td>02:47:29</td>     <th>  Log-Likelihood:    </th> <td> -203.54</td> \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>converged:</th>             <td>True</td>       <th>  LL-Null:           </th> <td> -308.08</td> \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Covariance Type:</th>     <td>nonrobust</td>    <th>  LLR p-value:       </th> <td>3.235e-43</td>\n",
       "</tr>\n",
       "</table>\n",
       "<table class=\"simpletable\">\n",
       "<tr>\n",
       "        <td></td>          <th>coef</th>     <th>std err</th>      <th>z</th>      <th>P>|z|</th>  <th>[0.025</th>    <th>0.975]</th>  \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Intercept</th>    <td>   -4.5025</td> <td>    0.449</td> <td>  -10.019</td> <td> 0.000</td> <td>   -5.383</td> <td>   -3.622</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>C(X1)[T.1]</th>   <td>   -0.1855</td> <td>    0.304</td> <td>   -0.611</td> <td> 0.541</td> <td>   -0.780</td> <td>    0.409</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>C(X2)[T.1]</th>   <td>   -0.1904</td> <td>    0.263</td> <td>   -0.723</td> <td> 0.470</td> <td>   -0.706</td> <td>    0.326</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>C(X3)[T.1]</th>   <td>    2.2978</td> <td>    0.356</td> <td>    6.458</td> <td> 0.000</td> <td>    1.600</td> <td>    2.995</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>C(X4_2)[T.1]</th> <td>    2.7051</td> <td>    0.312</td> <td>    8.671</td> <td> 0.000</td> <td>    2.094</td> <td>    3.316</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>C(X4_3)[T.1]</th> <td>    4.8170</td> <td>    0.450</td> <td>   10.702</td> <td> 0.000</td> <td>    3.935</td> <td>    5.699</td>\n",
       "</tr>\n",
       "</table>"
      ],
      "text/latex": [
       "\\begin{center}\n",
       "\\begin{tabular}{lclc}\n",
       "\\toprule\n",
       "\\textbf{Dep. Variable:}   &        Y         & \\textbf{  No. Observations:  } &      510    \\\\\n",
       "\\textbf{Model:}           &      Logit       & \\textbf{  Df Residuals:      } &      504    \\\\\n",
       "\\textbf{Method:}          &       MLE        & \\textbf{  Df Model:          } &        5    \\\\\n",
       "\\textbf{Date:}            & Tue, 10 Dec 2024 & \\textbf{  Pseudo R-squ.:     } &   0.3393    \\\\\n",
       "\\textbf{Time:}            &     02:47:29     & \\textbf{  Log-Likelihood:    } &   -203.54   \\\\\n",
       "\\textbf{converged:}       &       True       & \\textbf{  LL-Null:           } &   -308.08   \\\\\n",
       "\\textbf{Covariance Type:} &    nonrobust     & \\textbf{  LLR p-value:       } & 3.235e-43   \\\\\n",
       "\\bottomrule\n",
       "\\end{tabular}\n",
       "\\begin{tabular}{lcccccc}\n",
       "                       & \\textbf{coef} & \\textbf{std err} & \\textbf{z} & \\textbf{P$> |$z$|$} & \\textbf{[0.025} & \\textbf{0.975]}  \\\\\n",
       "\\midrule\n",
       "\\textbf{Intercept}     &      -4.5025  &        0.449     &   -10.019  &         0.000        &       -5.383    &       -3.622     \\\\\n",
       "\\textbf{C(X1)[T.1]}    &      -0.1855  &        0.304     &    -0.611  &         0.541        &       -0.780    &        0.409     \\\\\n",
       "\\textbf{C(X2)[T.1]}    &      -0.1904  &        0.263     &    -0.723  &         0.470        &       -0.706    &        0.326     \\\\\n",
       "\\textbf{C(X3)[T.1]}    &       2.2978  &        0.356     &     6.458  &         0.000        &        1.600    &        2.995     \\\\\n",
       "\\textbf{C(X4\\_2)[T.1]} &       2.7051  &        0.312     &     8.671  &         0.000        &        2.094    &        3.316     \\\\\n",
       "\\textbf{C(X4\\_3)[T.1]} &       4.8170  &        0.450     &    10.702  &         0.000        &        3.935    &        5.699     \\\\\n",
       "\\bottomrule\n",
       "\\end{tabular}\n",
       "%\\caption{Logit Regression Results}\n",
       "\\end{center}"
      ],
      "text/plain": [
       "<class 'statsmodels.iolib.summary.Summary'>\n",
       "\"\"\"\n",
       "                           Logit Regression Results                           \n",
       "==============================================================================\n",
       "Dep. Variable:                      Y   No. Observations:                  510\n",
       "Model:                          Logit   Df Residuals:                      504\n",
       "Method:                           MLE   Df Model:                            5\n",
       "Date:                Tue, 10 Dec 2024   Pseudo R-squ.:                  0.3393\n",
       "Time:                        02:47:29   Log-Likelihood:                -203.54\n",
       "converged:                       True   LL-Null:                       -308.08\n",
       "Covariance Type:            nonrobust   LLR p-value:                 3.235e-43\n",
       "================================================================================\n",
       "                   coef    std err          z      P>|z|      [0.025      0.975]\n",
       "--------------------------------------------------------------------------------\n",
       "Intercept       -4.5025      0.449    -10.019      0.000      -5.383      -3.622\n",
       "C(X1)[T.1]      -0.1855      0.304     -0.611      0.541      -0.780       0.409\n",
       "C(X2)[T.1]      -0.1904      0.263     -0.723      0.470      -0.706       0.326\n",
       "C(X3)[T.1]       2.2978      0.356      6.458      0.000       1.600       2.995\n",
       "C(X4_2)[T.1]     2.7051      0.312      8.671      0.000       2.094       3.316\n",
       "C(X4_3)[T.1]     4.8170      0.450     10.702      0.000       3.935       5.699\n",
       "================================================================================\n",
       "\"\"\""
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import statsmodels.api as sm\n",
    "import statsmodels.formula.api as smf\n",
    "from patsy import dmatrices\n",
    "\n",
    "\n",
    "df = df_dum.to_pandas()\n",
    "\n",
    "# perform Logistic regression\n",
    "\n",
    "# define the formula (response ~~ predictors)\n",
    "formula = \"Y ~ C(X1) + C(X2) + C(X3) + C(X4_2) + C(X4_3)\"\n",
    "\n",
    "y, X = dmatrices(formula, data=df, return_type='dataframe')\n",
    "\n",
    "# fit the model\n",
    "\"\"\"model = smf.logit(formula=formula, data=df).fit().step()\"\"\"\n",
    "\n",
    "model = sm.Logit(y, X).fit()\n",
    "\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 逐步选择法的多元 Logistic 回归"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Optimization terminated successfully.\n",
      "         Current function value: 163.433075\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 230.051125\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 160.329063\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 171.147334\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 160.561829\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 230.051125\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 230.185567\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 230.341139\n",
      "         Iterations 5\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 318.488254\n",
      "         Iterations 6\n",
      "Optimization terminated successfully.\n",
      "         Current function value: inf\n",
      "         Iterations 6\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 318.488254\n",
      "         Iterations 6\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 319.065121\n",
      "         Iterations 6\n",
      "Optimization terminated successfully.\n",
      "         Current function value: 320.008872\n",
      "         Iterations 6\n",
      "Optimization terminated successfully.\n",
      "         Current function value: inf\n",
      "         Iterations 7\n",
      "Optimization terminated successfully.\n",
      "         Current function value: inf\n",
      "         Iterations 7\n",
      "Optimization terminated successfully.\n",
      "         Current function value: inf\n",
      "         Iterations 7\n",
      "Optimization terminated successfully.\n",
      "         Current function value: inf\n",
      "         Iterations 7\n",
      "Optimization terminated successfully.\n",
      "         Current function value: inf\n",
      "         Iterations 7\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2385: RuntimeWarning: overflow encountered in exp\n",
      "  return 1/(1+np.exp(-X))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/discrete/discrete_model.py:2443: RuntimeWarning: divide by zero encountered in log\n",
      "  return np.sum(np.log(self.cdf(q * linpred)))\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/base/model.py:595: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n",
      "  warnings.warn('Inverting hessian failed, no bse or cov_params '\n",
      "/home/jimmy/projects/medical_statistics-1/.venv/lib/python3.10/site-packages/statsmodels/base/model.py:595: HessianInversionWarning: Inverting hessian failed, no bse or cov_params available\n",
      "  warnings.warn('Inverting hessian failed, no bse or cov_params '\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<table class=\"simpletable\">\n",
       "<caption>Logit Regression Results</caption>\n",
       "<tr>\n",
       "  <th>Dep. Variable:</th>           <td>Y</td>        <th>  No. Observations:  </th>  <td>   510</td> \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Model:</th>                 <td>Logit</td>      <th>  Df Residuals:      </th>  <td>   506</td> \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Method:</th>                 <td>MLE</td>       <th>  Df Model:          </th>  <td>     3</td> \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Date:</th>            <td>Tue, 10 Dec 2024</td> <th>  Pseudo R-squ.:     </th>  <td>   inf</td> \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Time:</th>                <td>02:47:29</td>     <th>  Log-Likelihood:    </th> <td>    -inf</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>converged:</th>             <td>True</td>       <th>  LL-Null:           </th> <td>  0.0000</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Covariance Type:</th>     <td>nonrobust</td>    <th>  LLR p-value:       </th>  <td> 1.000</td> \n",
       "</tr>\n",
       "</table>\n",
       "<table class=\"simpletable\">\n",
       "<tr>\n",
       "      <td></td>         <th>coef</th>     <th>std err</th>      <th>z</th>      <th>P>|z|</th>  <th>[0.025</th>    <th>0.975]</th>  \n",
       "</tr>\n",
       "<tr>\n",
       "  <th>X4_3</th>      <td>    4.7610</td> <td>    0.444</td> <td>   10.723</td> <td> 0.000</td> <td>    3.891</td> <td>    5.631</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>X4_2</th>      <td>    2.6836</td> <td>    0.310</td> <td>    8.651</td> <td> 0.000</td> <td>    2.076</td> <td>    3.292</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>X3</th>        <td>    2.3085</td> <td>    0.355</td> <td>    6.494</td> <td> 0.000</td> <td>    1.612</td> <td>    3.005</td>\n",
       "</tr>\n",
       "<tr>\n",
       "  <th>Intercept</th> <td>   -4.6471</td> <td>    0.422</td> <td>  -11.023</td> <td> 0.000</td> <td>   -5.473</td> <td>   -3.821</td>\n",
       "</tr>\n",
       "</table>"
      ],
      "text/latex": [
       "\\begin{center}\n",
       "\\begin{tabular}{lclc}\n",
       "\\toprule\n",
       "\\textbf{Dep. Variable:}   &        Y         & \\textbf{  No. Observations:  } &      510    \\\\\n",
       "\\textbf{Model:}           &      Logit       & \\textbf{  Df Residuals:      } &      506    \\\\\n",
       "\\textbf{Method:}          &       MLE        & \\textbf{  Df Model:          } &        3    \\\\\n",
       "\\textbf{Date:}            & Tue, 10 Dec 2024 & \\textbf{  Pseudo R-squ.:     } &      inf    \\\\\n",
       "\\textbf{Time:}            &     02:47:29     & \\textbf{  Log-Likelihood:    } &      -inf   \\\\\n",
       "\\textbf{converged:}       &       True       & \\textbf{  LL-Null:           } &    0.0000   \\\\\n",
       "\\textbf{Covariance Type:} &    nonrobust     & \\textbf{  LLR p-value:       } &    1.000    \\\\\n",
       "\\bottomrule\n",
       "\\end{tabular}\n",
       "\\begin{tabular}{lcccccc}\n",
       "                   & \\textbf{coef} & \\textbf{std err} & \\textbf{z} & \\textbf{P$> |$z$|$} & \\textbf{[0.025} & \\textbf{0.975]}  \\\\\n",
       "\\midrule\n",
       "\\textbf{X4\\_3}     &       4.7610  &        0.444     &    10.723  &         0.000        &        3.891    &        5.631     \\\\\n",
       "\\textbf{X4\\_2}     &       2.6836  &        0.310     &     8.651  &         0.000        &        2.076    &        3.292     \\\\\n",
       "\\textbf{X3}        &       2.3085  &        0.355     &     6.494  &         0.000        &        1.612    &        3.005     \\\\\n",
       "\\textbf{Intercept} &      -4.6471  &        0.422     &   -11.023  &         0.000        &       -5.473    &       -3.821     \\\\\n",
       "\\bottomrule\n",
       "\\end{tabular}\n",
       "%\\caption{Logit Regression Results}\n",
       "\\end{center}"
      ],
      "text/plain": [
       "<class 'statsmodels.iolib.summary.Summary'>\n",
       "\"\"\"\n",
       "                           Logit Regression Results                           \n",
       "==============================================================================\n",
       "Dep. Variable:                      Y   No. Observations:                  510\n",
       "Model:                          Logit   Df Residuals:                      506\n",
       "Method:                           MLE   Df Model:                            3\n",
       "Date:                Tue, 10 Dec 2024   Pseudo R-squ.:                     inf\n",
       "Time:                        02:47:29   Log-Likelihood:                   -inf\n",
       "converged:                       True   LL-Null:                        0.0000\n",
       "Covariance Type:            nonrobust   LLR p-value:                     1.000\n",
       "==============================================================================\n",
       "                 coef    std err          z      P>|z|      [0.025      0.975]\n",
       "------------------------------------------------------------------------------\n",
       "X4_3           4.7610      0.444     10.723      0.000       3.891       5.631\n",
       "X4_2           2.6836      0.310      8.651      0.000       2.076       3.292\n",
       "X3             2.3085      0.355      6.494      0.000       1.612       3.005\n",
       "Intercept     -4.6471      0.422    -11.023      0.000      -5.473      -3.821\n",
       "==============================================================================\n",
       "\"\"\""
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from polars import exclude\n",
    "from statsmodels.discrete.discrete_model import BinaryResultsWrapper\n",
    "\n",
    "def stepwise_selection(X: pl.DataFrame, y: pl.Series, \n",
    "                       initial_list: list=[], \n",
    "                       threshold_in=0.10, \n",
    "                       threshold_out = 0.15, \n",
    "                       verbose=True\n",
    ") -> BinaryResultsWrapper:\n",
    "    \"\"\"\n",
    "    Perform stepwise selection of Logistic Regression.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    X (pl.DataFrame):\n",
    "        Independent variables.\n",
    "    y (pl.Series):\n",
    "        Dependent variable.\n",
    "    initial_list (list, optional):\n",
    "        Initial list of variables to include in the model. Defaults to [].\n",
    "    threshold_in (float, optional):\n",
    "        Threshold for adding a variable. Defaults to 0.10.\n",
    "    threshold_out (float, optional):\n",
    "        Threshold for removing a variable. Defaults to 0.15.\n",
    "        Warning: `threshold_out` should be greater than `threshold_in`!\n",
    "    verbose (bool, optional):\n",
    "        Whether to print information about the process. Defaults to `True`.\n",
    "\n",
    "    Returns\n",
    "    -------\n",
    "    BinaryResultsWrapper:\n",
    "        The final statsmodels model.\n",
    "    \"\"\"\n",
    "\n",
    "    if threshold_in >= threshold_out:\n",
    "        raise ValueError(\"`threshold_in` should be smaller than `threshold_out`.\")\n",
    "\n",
    "    # initialise data\n",
    "    variables = set(X.columns)\n",
    "    included = list(set(initial_list))\n",
    "    excluded = list(variables - set(included))\n",
    "\n",
    "    while True:\n",
    "        # flag which indicates whether a variable was added or removed\n",
    "        changed = False\n",
    "\n",
    "        # forward step\n",
    "\n",
    "        if verbose:\n",
    "            print(f\"forward step\")\n",
    "        \n",
    "        ## get the best new variable\n",
    "        ### try adding a new variable in and calculate the p-value of corresponding coefficients.\n",
    "        new_pvalues = pl.DataFrame(\n",
    "            schema={\n",
    "                \"variable\": pl.Utf8,\n",
    "                \"pvalue\": pl.Float64\n",
    "            }\n",
    "        )\n",
    "        for candidate_variable in excluded:\n",
    "\n",
    "            if verbose:\n",
    "                print(f\"candidate_variable: {candidate_variable}\")\n",
    "\n",
    "            # fit the model\n",
    "            model = sm.Logit(\n",
    "                y.to_pandas(),\n",
    "                X.select(included + [candidate_variable])\n",
    "                    .with_columns(pl.lit(1).alias(\"Intercept\"))\n",
    "                    .to_pandas()\n",
    "            ).fit()\n",
    "            # get the p-value of the fitted result\n",
    "            new_pvalues = new_pvalues.extend(\n",
    "                pl.DataFrame(\n",
    "                    data={\n",
    "                        \"variable\": [candidate_variable],\n",
    "                        \"pvalue\": [model.pvalues[candidate_variable]]\n",
    "                    }\n",
    "                )\n",
    "            )\n",
    "\n",
    "            if verbose:\n",
    "                print(f\"p-value of {candidate_variable}: {model.pvalues[candidate_variable]}\")\n",
    "\n",
    "        best_pvalue = new_pvalues.filter(pl.col(\"variable\")!=\"Intercept\").sort(\"pvalue\").head(1).select(\"pvalue\").item()\n",
    "        best_variable = new_pvalues.filter(pl.col(\"variable\")!=\"Intercept\").sort(\"pvalue\").head(1).select(\"variable\").item()\n",
    "        \n",
    "        if verbose:\n",
    "            print(f\"best_pvalue: {best_pvalue}, belonging to {best_variable}. Take in: {best_pvalue < threshold_in}\")\n",
    "        \n",
    "        if best_pvalue < threshold_in:\n",
    "            changed = True\n",
    "            # add the best variable\n",
    "            included.append(best_variable)\n",
    "            excluded = list(set(excluded) - set(included))\n",
    "        \n",
    "        # backward step\n",
    "        if verbose:\n",
    "            print(f\"backward step\")\n",
    "        ## fit again\n",
    "        model = sm.Logit(\n",
    "            y.to_pandas(),\n",
    "            X.select(included)\n",
    "                .with_columns(pl.lit(1).alias(\"Intercept\"))\n",
    "                .to_pandas()\n",
    "        ).fit()\n",
    "        ## check if the worst p-value is greater than threshold_out\n",
    "        new_pvalues = pl.DataFrame(\n",
    "            data={\n",
    "                \"variable\": model.pvalues.index,\n",
    "                \"pvalue\": model.pvalues\n",
    "            }\n",
    "        )\n",
    "        worst_pvalue = new_pvalues.filter(pl.col(\"variable\")!=\"Intercept\").sort(\"pvalue\", descending=True).head(1).select(\"pvalue\").item()\n",
    "        worst_variable = new_pvalues.filter(pl.col(\"variable\")!=\"Intercept\").sort(\"pvalue\", descending=True).head(1).select(\"variable\").item()\n",
    "    \n",
    "        if verbose:\n",
    "            print(f\"{worst_pvalue=}, belonging to {worst_variable}. Take out: {worst_pvalue > threshold_out}\")\n",
    "\n",
    "        if worst_pvalue > threshold_out:\n",
    "            changed = True\n",
    "            # remove the worst variable\n",
    "            excluded.append(worst_variable)\n",
    "            included = list(set(included) - set(excluded))\n",
    "        \n",
    "        if not changed:\n",
    "            if verbose:\n",
    "                print(\"No change in the model. Break the loop.\")\n",
    "            break\n",
    "\n",
    "    return model\n",
    "        # remember to refresh `included` and `excluded`\n",
    "\n",
    "model = stepwise_selection(\n",
    "    df_dum.select(set(df_dum.columns) - set([\"level\", \"Y\"])),\n",
    "    df_dum.get_column(\"Y\"),\n",
    "    verbose=False\n",
    ")\n",
    "\n",
    "model.summary()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "通过双向选择的逐步 Logistic 回归得知，服药情况 (X3) 和治疗情况 (X4) 对回归模型有统计学意义，即对骨折疗效有影响。骨折类型 (X1)，治疗方法 (X2) 对回归模型无统计学意义。\n",
    "\n",
    "各参数的优势比如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>coef</th>\n",
       "      <th>OR</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>X4_3</th>\n",
       "      <td>4.761002</td>\n",
       "      <td>116.862927</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>X4_2</th>\n",
       "      <td>2.683644</td>\n",
       "      <td>14.638342</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>X3</th>\n",
       "      <td>2.308543</td>\n",
       "      <td>10.059754</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Intercept</th>\n",
       "      <td>-4.647136</td>\n",
       "      <td>0.009589</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "               coef          OR\n",
       "X4_3       4.761002  116.862927\n",
       "X4_2       2.683644   14.638342\n",
       "X3         2.308543   10.059754\n",
       "Intercept -4.647136    0.009589"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "params = pd.DataFrame(model.params, columns=['coef'])\n",
    "\n",
    "params['OR'] = np.exp(params['coef'])\n",
    "\n",
    "params"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
