\documentclass[t,12pt,aspectratio=169]{beamer} % 16:9 宽屏比例，适合现代投影
\usepackage{ctex} % 中文支持
\usepackage{amsmath, amssymb} % 数学公式与符号
\usepackage{graphicx}
\usepackage{pythonhighlight}
\usepackage{url}
\usepackage{hyperref}
\usepackage{verbatim}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 主题设置（推荐简洁风格）
\usetheme{Madrid}
\usecolortheme{default} % 可选：seahorse, beaver, dolphin 等

\title{应用回归分析第7章：岭回归 }
\author{HXQ ET AL}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}

\begin{frame}
  \titlepage
\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{第7章书本目录 }

\begin{enumerate}

\item[7.1.] 岭回归估计的定义
\item[7.2.] 岭回归估计的性质
\item[7.3.] 岭迹分析
\item[7.4.] 岭参数的选择
\item[7.5.] 用岭回归选择变量

\end{enumerate}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{第7章PPT目录}

    \begin{enumerate}
    \item[7.1] 多重共线性产生的问题以及岭回归的思路
    \item[7.2] 岭迹图的概念
    \item[7.3] 选择岭参数的方法
    \item[7.4] 例子：Longley(1967) 数据
    \item[7.5] 例子：变量的含义与待解决的问题
    \item[7.6] 例子解答：相关系数矩阵
    \item[7.7] 例子解答：普通最小二乘的回归结果
    \item[7.8] 例子解答：方差扩大因子与条件数
    \item[7.9] 例子解答：岭迹图的分析
    \item[7.10] 例子解答：岭参数的选择(VIF标准)
    \item[7.11] 例子解答：岭参数的选择(GCV标准)
    %\item[7.a] 例子解答：Python程序
    \end{enumerate}


\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.1 多重共线性产生的问题以及岭回归的思路}  

\begin{itemize}
\item {\color{red}当设计矩阵 $X$ 的列向量之间近似线性相关时，$X^TX$ 接近奇异阵。}
\item 这时最小二乘法的参数估计及其方差都会增大，导致参数估计不准： 
\[ {\color{red}\boxed{ \hat{\beta}=(X^TX)^{-1}X^T{\bf y}, \hspace{0.4cm} 
\textrm{cov}(\hat{\beta},\hat{\beta})=\sigma^2(X^TX)^{-1} }} \] 
\item 岭回归的思路是用 $X^TX+kI (k>0)$ 来代替 $X^TX$, 以降低条件数：
\begin{eqnarray*}
{\color{red}\boxed{ \hat{\beta}(k) = (X^TX+kI)^{-1}X^T{\bf y}  }}
\end{eqnarray*}

\vspace{-0.6cm}

\begin{eqnarray*}
 \textrm{cov}(\hat{\beta}(k), \hat{\beta}(k)) = \sigma^2(X^TX+kI)^{-1} (X^TX)  (X^TX+kI)^{-1} =\sigma^2\cdot C(k)
\end{eqnarray*}

\item 岭参数 $k>0$, 需要合适选取。

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.2 岭迹图的概念}  

\begin{itemize}
\item 岭迹图的横坐标是岭参数 $k$ 的不同取值。
\item 岭迹图的纵坐标是各参数 $\hat{\beta}_1(k), \hat{\beta}_2(k), \cdots, \hat{\beta}_p(k)$ 的值。
\item 岭迹图的例子。注意到参数的符号变化。
%\item 

\begin{center}
\includegraphics[height=0.5\textheight, width=0.9\textwidth]{longley-ridge.png}
\end{center}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.3 选择岭参数的方法}  

\begin{enumerate}
%\item {\color{blue}固定点方法}：Hoerl, Kennard 和 Baldwin 在1975年提出下述公式：
%\[ k_0 = \frac{p\hat{\sigma}^2(0)}{\hat{\beta}_1(0)^2+\cdots+\hat{\beta}_p(0)^2} \]
%\item 迭代方法：Hoerl 和 Kennard 在1976年提出下述迭代公式：
%\[ k_{i+1} = \frac{p\hat{\sigma}^2(0)}{\hat{\beta}_1(k_i)^2+\cdots+\hat{\beta}_p(k_i)^2} \]

\item {\color{blue}观察岭迹图}：选取使得参数稳定的最小的$k$. 

\item {\color{blue}方差扩大因子}：选取使所有的 $VIF_j(k)$ 小于10 的最小的 $k$. 
%$VIF(k)$ 即下述矩阵的对角线元素：
%岭回归的参数估计的方差扩大因子为 
%将数据中心化和标准化之后，自变量之间的相关系数矩阵为 $r=(X^TX)/(n-1)$, 其逆阵 $C=r^{-1}=(n-1)(X^TX)^{-1}$ 的 
\[  {\color{red}\boxed{ VIF_j(k)=C(k)_{jj} }},\hspace{0.4cm} C(k)=(X^TX+kI)^{-1}(X^TX)(X^TX+kI)^{-1} \]

\item {\color{blue}残差平方和}：给定 $c>1$, 选取使得 $SSE(k)<c\cdot SSE$ 的最大的 $k$. \\
这样可以把残差平方和的增加控制在一定范围内。

\item {\color{blue}CV方法}：使得交叉验证的误差最小。

\end{enumerate}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.4 例子：Longley(1967) 数据}  

{\tiny 
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|}\hline
Y 	   & X1     & X2               & X3      &	X4      &	X5         &	X6 \\ \hline  
60323 & 	830 & 	234289 & 	2356  &	1590  &	107608 &  1947 \\ \hline  
61122 & 	885 &	259426 & 	2325  &	1456  &	108632 & 	1948 \\ \hline  
60171 & 	882 &	258054 & 	3682  &	1616  &	109773 &	1949 \\ \hline  
61187 & 	895 & 	284599 & 	3351  &	1650  &	110929 & 	1950 \\ \hline  
63221 & 	962 & 	328975 & 	2099  &	3099  &	112075 & 	1951 \\ \hline  
63639 & 	981 & 	346999 & 	1932  &	3594  &	113270 & 	1952 \\ \hline  
64989 & 	990 & 	365385 & 	1870  &	3547  &	115094 & 	1953 \\ \hline  
63761 & 	1000 & 	363112 & 	3578  &	3350  &	116219 & 	1954 \\ \hline  
66019 & 	1012 & 	397469 & 	2904  &	3048  &	117388 & 	1955 \\ \hline  
67857 & 	1046 & 	419180 & 	2822  &	2857  &	118734 & 	1956 \\ \hline  
68169 & 	1084 & 	442769 & 	2936  &	2798  &	120445 & 	1957 \\ \hline  
66513 & 	1108 & 	444546 & 	4681  &	2637  &	121950 & 	1958 \\ \hline  
68655 & 	1126 & 	482704 & 	3813  &	2552  &	123366 & 	1959 \\ \hline  
69564 & 	1142 & 	502601 & 	3931  &	2514  &	125368 & 	1960 \\ \hline  
69331 & 	1157 & 	518173 & 	4806  &	2572  &	127852 & 	1961 \\ \hline  
70551  &	1169 & 	554894 & 	4007  &	2827  &	130081 & 	1962 \\ \hline  
\end{tabular}
\end{center}
}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.5 例子：变量的含义与待解决的问题}  

\begin{itemize}
\item 变量的含义：
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|}\hline 
X1 & GNP.deflator 	& X4 & Armed Forces & Y & Employed \\ \hline 
X2 & GNP 		& X5 & Population &&  \\ \hline 
X3 & Unemployed 	& X6 & Year && \\ \hline 
\end{tabular}
\end{center}

\item 待解决的问题：
    \begin{enumerate}
    \item 检验多重共线性。
%    \item 说明普通最小二乘的参数符号号不符合变量之间的相关性。
    \item 使用岭回归方法。
    \end{enumerate}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.6 例子解答：相关系数矩阵}  

\begin{itemize}
\item 从相关系数矩阵可以看出，变量之间就有近似线性关系。
\item 因变量与所有自变量的相关系数都是正的。 

%{\footnotesize
%\begin{center}
%\begin{tabular}{|c|c|c|c|c|c|c|c|}\hline
%    &   Y  &  X1  &  X2  &  X3  &  X4  &  X5  &  X6 \\  \hline 
%Y  & 1.00 & 0.97 & 0.98 & 0.50 & 0.46 & 0.96 & 0.97 \\  \hline 
%X1 & 0.97 & 1.00 & 0.99 & 0.62 & 0.46 & 0.98 & 0.99 \\  \hline 
%X2 & 0.98 & 0.99 & 1.00 & 0.60 & 0.45 & 0.99 & 1.00 \\  \hline 
%X3 & 0.50 & 0.62 & 0.60 & 1.00 & -0.18 & 0.69 & 0.67 \\  \hline 
%X4 & 0.46 & 0.46 & 0.45 & -0.18 & 1.00 & 0.36 & 0.42 \\  \hline 
%X5 & 0.96 & 0.98 & 0.99 & 0.69 & 0.36 & 1.00 & 0.99 \\  \hline 
%X6 & 0.97 & 0.99 & 1.00 & 0.67 & 0.42 & 0.99 & 1.00 \\  \hline 
%\end{tabular}
%\end{center}
%}

{\footnotesize\color{blue}
\begin{verbatim}
           Y    X1    X2    X3    X4    X5    X6
    Y   1.00  0.97  0.98  0.50  0.46  0.96  0.97
    X1  0.97  1.00  0.99  0.62  0.46  0.98  0.99
    X2  0.98  0.99  1.00  0.60  0.45  0.99  1.00
    X3  0.50  0.62  0.60  1.00 -0.18  0.69  0.67
    X4  0.46  0.46  0.45 -0.18  1.00  0.36  0.42
    X5  0.96  0.98  0.99  0.69  0.36  1.00  0.99
    X6  0.97  0.99  1.00  0.67  0.42  0.99  1.00
\end{verbatim}
}


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.7 例子解答：普通最小二乘的回归结果}  

\begin{itemize}
\item 将自变量和因变量的数据标准化，做普通线性回归。
\item 自变量 X2 的系数为负，不符合经济含义。\\%与相关系数的正负号不符合。\\另外
\item 自变量 X1,X2,X5 的 t 检验不显著。

%\begin{center}
{\scriptsize\color{blue}
\begin{verbatim}
==============================================================================
Dep. Variable:                      Y   R-squared:                       0.995
Model:                            OLS   Adj. R-squared:                  0.993                              
==============================================================================
                 coef    std err          t      P>|t|      [0.025      0.975]
------------------------------------------------------------------------------
X1             0.0463      0.248      0.187      0.855      -0.505       0.598
X2            -1.0137      0.899     -1.127      0.286      -3.017       0.990
X3            -0.5375      0.123     -4.360      0.001      -0.812      -0.263
X4            -0.2047      0.040     -5.083      0.000      -0.294      -0.115
X5            -0.1012      0.425     -0.238      0.816      -1.048       0.845
X6             2.4797      0.586      4.233      0.002       1.174       3.785
==============================================================================
\end{verbatim}
}
%\end{center}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.8 例子解答：方差扩大因子与条件数}  

\begin{itemize}
\item 方差扩大因子与特征值如下表。条件数为 12220. 
{\footnotesize
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|}\hline
自变量 & X1 & X2 & X3 & X4 & X5 & X6 \\ \hline 
VIF & 135.53 & 1788.51 &   33.62 & 3.59 &  399.15 &  758.98 \\ \hline 
特征值 & 69.05   & 17.63 &  3.05      &  0.22   &  0.04  &  0.01 \\ \hline 
\end{tabular}
\end{center}
}

\item 特征向量：
{\footnotesize
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|}\hline
特征值 & 69.05 & 17.63 &  3.05 &  0.22   &  0.04  &  0.01 \\ \hline 
&0.46 &  0.06 & -0.15 &  0.79 &  0.34 &  0.14 \\ \hline 
&0.46 &  0.05 & -0.28 & -0.12 & -0.15 & -0.82 \\ \hline 
&0.32 & -0.6 &  0.73 &  0.01 &  0.01 & -0.11 \\ \hline 
&0.2 &  0.8 &  0.56 & -0.08 &  0.02 & -0.02 \\ \hline 
&0.46 & -0.05 & -0.2 & -0.59 &  0.55 &  0.31 \\ \hline 
&0.46 &  0  & -0.13 & -0.05 & -0.75 &  0.45 \\ \hline 
\end{tabular}
\end{center}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{7.9 例子解答：岭迹图的分析}  

\begin{itemize}
\item 从岭迹图看出，X2的系数从负号变成正号。%\\ 以下是程序5的图形。

\begin{center}
\includegraphics[height=0.5\textheight, width=0.9\textwidth]{longley-ridge.png}
\end{center}


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.10.1 例子解答：岭参数的选择(VIF标准)}  

\begin{itemize}
\item 当岭参数 $k= 0.016$ 时，$VIF_j(k)$ 都小于10. 以下是程序(5)的结果。
%此时变量 X2的系数为正，与经济意义相符合。

{\scriptsize\color{blue}
\begin{verbatim}
In [37]: VIF.round(2)
Out[37]: 
array([[  9.04, 119.23,   2.24,   0.24,  26.61,  50.6 ],
       [  7.23,  65.29,   1.31,   0.21,  18.01,  32.86],
       [  6.27,  41.22,   0.9 ,   0.2 ,  13.85,  24.36],
       [  5.66,  28.42,   0.68,   0.19,  11.41,  19.43],
       [  5.22,  20.8 ,   0.55,   0.18,   9.79,  16.21],
       [  4.88,  15.91,   0.46,   0.18,   8.62,  13.93],
       [  4.6 ,  12.58,   0.4 ,   0.18,   7.73,  12.21],
       [  4.36,  10.2 ,   0.36,   0.18,   7.03,  10.86],
       [  4.16,   8.45,   0.33,   0.17,   6.45,   9.76],  <<<---
       [  3.98,   7.12,   0.31,   0.17,   5.96,   8.86],

In [38]: k
Out[38]: 
array([0.   , 0.002, 0.004, 0.006, 0.008, 0.01 , 0.012, 0.014, 0.016,  <<<---
       0.018, 0.02 , 0.022, 0.024, 0.026, 0.028, 0.03 , 0.032, 0.034,       
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.10.2 例子解答：岭参数的选择(VIF标准)}  

\begin{itemize}
\item 当岭参数 $k= 0.016$ 时，变量 X2的系数为正，与经济意义相符合。\\ 以下是程序(5)的计算结果。

{\footnotesize\color{blue}
\begin{verbatim}
In [41] beta.round(3)
Out[41]: 
array([[ 0.046, -1.014, -0.538, -0.205, -0.101,  2.48 ],
       [ 0.005, -0.603, -0.481, -0.193, -0.214,  2.181],
       [-0.01 , -0.366, -0.447, -0.185, -0.265,  1.985],
       [-0.014, -0.214, -0.425, -0.18 , -0.287,  1.841],
       [-0.011, -0.108, -0.409, -0.175, -0.295,  1.729],
       [-0.006, -0.031, -0.397, -0.172, -0.294,  1.636],
       [ 0.001,  0.027, -0.387, -0.169, -0.289,  1.558],
       [ 0.009,  0.072, -0.38 , -0.166, -0.28 ,  1.491],
       [ 0.017,  0.108, -0.373, -0.164, -0.27 ,  1.432],  <<<---
       [ 0.026,  0.137, -0.368, -0.162, -0.259,  1.379],
       
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.11 例子解答：岭参数的选择(CV标准)}  

\begin{itemize}
\item 通过 sklearn 模块的 RidgeCV 函数选择岭参数。结果为 $k=0.0026$. 
\item CV的思路是交叉验证，使预测误差为最小。以下是程序(1-4)的输出。

{\footnotesize\color{blue}
\begin{verbatim}
OLS score: 0.9955
OLS coefficients: [[ 0.046 -1.014 -0.538 -0.205 -0.101  2.48 ]] 

Ridge(alpha=0.016) score: 0.9955
Ridge(alpha=0.016) coefficients: 
        [[ 0.017  0.108 -0.373 -0.164 -0.27   1.432]] 

RidgeCV score: 0.9953
RidgeCV alpha: 0.0026  <<<---
RidgeCV coefficients: [[-0.001 -0.519 -0.469 -0.19  -0.234  2.114]] 
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.a.1 例子解答：Python程序(1)}  

\begin{itemize}
\item 载入 sklearn 模块中的线性回归与岭回归的函数，将数据标准化。

{\footnotesize\color{blue}
\begin{verbatim}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import RidgeCV

mydata=pd.read_csv('longley.csv')
#将自变量和因变量中心化和标准化
mydata_normd=(mydata - mydata.mean())/mydata.std()
A=np.asmatrix(mydata_normd)
X=A[:,1:]
y=A[:,0]
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.a.2 例子解答：Python程序(2)}  

\begin{itemize}
\item 测试普通线性回归和岭回归。

{\footnotesize\color{blue}
\begin{verbatim}
#最小二乘法，参数估计
reg01=LinearRegression()
reg01.fit(X,y)
print('OLS score:',reg01.score(X,y).round(4))
print('OLS coefficients:',reg01.coef_.round(3),'\n')

#岭回归，参数估计，固定岭参数
reg02=Ridge(alpha=0.016)
reg02.fit(X,y)
print('Ridge(alpha=0.016) score:',reg01.score(X,y).round(4))
print('Ridge(alpha=0.016) coefficients:',reg02.coef_.round(3),'\n')
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.a.3 例子解答：Python程序(3)}  

\begin{itemize}
\item 岭回归，给定一些岭参数，画出岭迹图。

{\footnotesize\color{blue}
\begin{verbatim}
alphas=np.linspace(0,0.3,51)
betas=np.zeros((51,6))
for i in range(51):
    reg03=Ridge(alpha=alphas[i])
    reg03.fit(X,y)
    betas[i]=reg03.coef_

ax=plt.gca()
ax.plot(alphas,betas)
plt.xlabel('k')
plt.ylabel('beta(k)')
plt.title('Ridge coefficients as a function of k')
plt.grid(True)
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.a.4 例子解答：Python程序(4)}  

\begin{itemize}
\item 岭回归，按CV标准自动选择岭参数。

{\footnotesize\color{blue}
\begin{verbatim}
alphas=np.linspace(0.0001,0.1,1000)
reg04=RidgeCV(alphas)
reg04.fit(X,y)
print('RidgeCV score:',reg04.score(X,y).round(4))
print('RidgeCV alpha:',reg04.alpha_)
print('RidgeCV coefficients:',reg04.coef_.round(3),'\n')
\end{verbatim}
}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[fragile=singleslide]{7.a.5 例子解答：Python程序(5)}  

\begin{itemize}
\item 列举岭参数的值，计算回归参数，画出岭迹图，计算 VIF.

{\scriptsize\color{blue}
\begin{verbatim}
B=np.dot(X.T,X); E6=np.diag(np.ones(6))
Nk=101; k=np.linspace(0,0.2,Nk)

beta=np.zeros((Nk,6))
for i in range(Nk):
    Binv=np.linalg.inv(B+k[i]*E6)
    beta[i]=np.dot(np.dot(Binv,X.T),y).T

for i in range(6):
    plt.plot(k,beta[:,i],'-',label='beta_{}'.format(i+1))
plt.legend(); plt.grid(True); plt.xlabel('k') 
plt.ylabel('beta hat (k)')

VIF=np.zeros((Nk,6))
for i in range(Nk):
    Binv=np.linalg.inv(B+k[i]*E6)
    C=np.dot(np.dot(Binv,B),Binv)
    VIF[i]=np.diag(C)
\end{verbatim}
}

\end{itemize}

\end{frame}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\end{document}


