\documentclass[oneside]{homework} %%Change `twoside' to `oneside' if you are printing only on the one side of each sheet.

\studname{Ran Yu}
\studmail{ry2239@columbia.edu}
\coursename{Machine Learning}
\hwNo{2}
\uni{ry2239}


\usepackage{graphicx}
\usepackage{subfigure}
\begin{document}
\maketitle

\section*{Problem 1}
For the hypothesis space given by the problem, VC dimension is 4.
\begin{figure}[!h]
     \centering  
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p1.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p2.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p3.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p4.jpg}}\\
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p5.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p6.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p7.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p8.jpg}}\\
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p9.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p10.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p11.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p12.jpg}}\\
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p13.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p14.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p15.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/p16.jpg}}\\
 \end{figure}\\
 For h=5, we make the classifier that include all the 5 points inside it. We minimize the rectangle that the line of the edge of the rectangle tangent to the points. At this time, if one of 5 points is negative, then it could not be correctly classified.
 See the Figure 1 as illustration of the problem.
 \begin{figure}[!h] 
     \centering   
     \includegraphics[width=9cm, height=7.5cm]{prob1/h5.jpg} 
     \caption{\label{lb}h=5} 
\end{figure}\\
BONUS:\\
if there is a freedom to decide whether the inside and outside can be positive or negative, the VC dimension could be 5.\\
\begin{figure}[!h]
     \centering  
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/h5-1.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/h5-2.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/h5-3.jpg}}
     \subfigure[]{ \includegraphics[width=3cm, height=2.5cm]{prob1/h5-4.jpg}}
 \end{figure}\\
 We can see that after change the hypothesis space with a freedom to decide whether the inside and outside can be positive or negative, we could classify 5 points correctly and the VC could be 5.
\section*{Problem 2}
'$\Rightarrow$':\\
$\mathbf{K}=\left(\begin{array}{cccc}
k_{11}&k_{12}&\ldots &k_{1n}\\
k_{21}&k_{22}&\ldots&k_{2n}\\
\vdots&\vdots&\ddots&\vdots\\
k_{n1}&k_{n2}&\ldots&k_{nn}\end{array}\right)$\\\\
for each $\mathbf{K_{i,j}}=k(x_i,x_j)$, and $k(x_i,x_j)=\phi(x_i)^T\phi(x_j)$\\\\
thus,
$\mathbf{K}=\left(\begin{array}{cccc}
\phi(x_1)^T\phi(x_1)&\phi(x_1)^T\phi(x_2)&\ldots&\phi(x_1)^T\phi(x_n)\\
\phi(x_2)^T\phi(x_1)&\phi(x_2)^T\phi(x_2)&\ldots&\phi(x_2)^T\phi(x_n)\\
\vdots&\vdots&\ddots &\vdots\\
\phi(x_n)^T\phi(x_1)&\phi(x_n)^T\phi(x_2)&\ldots&\phi(x_n)^T\phi(x_n)\end{array}\right)\\=\left(\begin{array}{cccc}\phi(x_1)^T\\\phi(x_2)^T\\\vdots\\\phi(x_n)\end{array}\right)\left(\begin{array}{cccc}\phi(x_1)&\phi(x_2)&\ldots&\phi(x_n)\end{array}\right)$\\
Therefore, $c^TKc=c^T\left(\begin{array}{cccc}\phi(x_1)^T\\\phi(x_2)^T\\\vdots\\\phi(x_n)^T\end{array}\right)\left(\begin{array}{cccc}\phi(x_1)&\phi(x_2)&\ldots&\phi(x_n)\end{array}\right)c$\\
For $c^T\left(\begin{array}{cccc}\phi(x_1)^T\\\phi(x_2)^T\\\vdots\\\phi(x_n)\end{array}\right)=\left(\begin{array}{cccc}\phi(x_1)&\phi(x_2)&\ldots&\phi(x_n)\end{array}\right)c=m$, m is a single value, so $c^TKc=m^2\geq 0$, the kernel matrix \textbf{K} is positive semi-definite.\\
'$\Leftarrow$':\\
It has a Cholesky decomposition: the Matrix M is positive definite and it could be decomposed as $\mathbf{K}=L^TL$. Replace $L$ with $\phi(x)$ we can get the kernel matrix $K$ composed with Mercer kernel $k(.,.)$.\\
a) $k(x,\tilde{x})=\alpha k_1(x,\tilde{x})+\beta k_2(x,\tilde{x})$\\
$~~~~~k(x,\tilde{x})=\alpha \phi_1(x)^T\phi_1(\tilde{x})+\beta \phi_2(x)^T\phi_2(\tilde{x})$\\
$~~~~~\mathbf{K}=\alpha \mathbf{K_1}+\beta \mathbf{K_2}$,So $c^TKc=c^T(\alpha K_1+\beta K_2)c=\alpha c^TK_1c+\beta c^TK_2c$. For~$\mathbf{K_1}$~and $\mathbf{K_2}$ are both positive semi-definite, $c^TK_1c\geq0$ and $c^TK_2c\geq0$, and for $\alpha , \beta \geq 0$, so $K \geq 0$.\\
b) $k(x,\tilde{x})=k_1(x,\tilde{x})\times k_2(x,\tilde{x})$\\
$~~~~~k_1(x,\tilde{x})=\sum_{i=1}^m \phi_i(x) \phi_i(\tilde{x})$, and $k_2(x,\tilde{x})=\sum_{i=1}^n \phi_i(x) \phi_i(\tilde{x})$ thus,\\
$~~~~~k(x,\tilde{x})=\sum_{i,j=1}^{m,n}\phi_i(x)\phi_i(\tilde{x})\phi_j(x)\phi_j(\tilde{x})=\sum_{i,j=1}^{m,n}\phi_i(x)\phi_j(x)\phi_i(\tilde{x})\phi_j(\tilde{x})$
\\$~~~~~$Let's define: $\phi(x)=\left(\begin{array}{cccc}\phi_1(x)\phi_1(x)\\\phi_1(x)\phi_2(x)\\\vdots\\\phi_n(x)\phi_m(x)\end{array}\right)$\\
$~~~~~k(x,\tilde{x})=\phi(x)^T\phi(\tilde{x})$, and as we have proven before, the kernel matrix K is positive semi-definite, and $k(x,\tilde{x})=k_1(x,\tilde{x})\times k_2(x,\tilde{x})$ is also Mercer kernel.\\
c) $k(x,\tilde{x})=f(k_1(x,\tilde{x}))$\\
$~~~~~$for that $f$ is any polynomial with positive coefficients, $k(x,\tilde{x})=a(k_1(x,\tilde{x}))+b(k_1(x,\tilde{x}))^2+\dots$, as what have proven in $b)$ and $a)$, the multiply of valid kernels would still be valid kernel, and the sum of kernels would still be valid kernel. Hence, $k(x,\tilde{x})$ is also Mercer kernel.\\
d)$k(x,\tilde{x})=exp(k_1(x,\tilde{x}))$\\
$~~~~~$Let's first justify that $k(x,\tilde{x})+n$, $n~is~a~constant~and~n>0$ is also valid.  The matrix $N$ consisted by $n$ would be $N_{i,j}=n$, and $c^TNc\geq0$. So that $k(x,\tilde{x})+n$ is also valid.\\
$~~~~~$We expanded $k(x,\tilde{x})$ with Taylor Series, $k(x,\tilde{x})=1+k_1(x,\tilde{x})+1/2\times k_1(x,\tilde{x})^2+1/3!\times k_1(x,\tilde{x})+\dots$, in this way, $k(x,\tilde{x})$ could be considered as $k(x,\tilde{x})=f(k_1(x,\tilde{x}))$, and $f$ is a polynomial with positive coefficients. As what was proven in c), we could get that $k(x,\tilde{x})$ is also valid.\\

\section*{Problem 3}
 If we set the gradient of $L(w)$ with respect to $w$ equal to 0, we see that the solution for w takes the form of a linear combination of the vectors $\phi(x_n)$, with coefficients that are functions of w, of the form\\
 $L'(w)=2\sum_{i=1}^n(w^T\phi(x_i)-y_i)\phi(x_i)+2\lambda w=0$\\ thus, $w=-\frac{1}{\lambda}\sum_{i=1}^n(w^T\phi(x_i)-y_i)\phi(x_i)=\sum_{i-1}^{n}\alpha_i\phi(x_i)$, defining $\alpha_i = -\frac{1}{\lambda}w^T\phi(x_i)-y_i$, $w=\mathbf{\phi^T\alpha}$\\
$L(\alpha)=\alpha^T\phi\phi^T\phi\phi^T\alpha-2\alpha^T\phi\phi^Tt+t^Tt+\lambda\alpha^T\phi\phi^T\alpha$, $t=[t_1,t_2\dots t_n]$\\
Define $K=\phi \phi^T$, K is a Gram Matrix, and $K_{i,j}=\phi(x_i)^T\phi(x_j)$. $L(\alpha)$ could be rewrite as $L(\alpha)=\alpha^TKK\alpha-2\alpha^TKt+t^Tt+\lambda\alpha^TK\alpha$.\\ Set the gradient of $L(\alpha)$ with respect to $\alpha$ equal to 0, we get that $\alpha=(K+\lambda I_N)^{-1}t$
\section*{Problem 4}
Running SVM on linear kernel and change C we get the result in figure 2. We can see that when C>0.01, error becomes 0.
 \begin{figure}[!h] 
     \centering   
     \includegraphics[width=9cm, height=7.5cm]{prob4/LC.pdf} 
     \caption{\label{lb}Linear Kernel-C} 
\end{figure}\\
When we run SVM on Polynomial kernel, with the change of the degree, we get the result in Figure 3. We set C=0.001, and we can see that at when degree$\geq$2, error becomes 0.
 \begin{figure}[!h] 
     \centering   
     \includegraphics[width=9cm, height=7.5cm]{prob4/PolyD.pdf} 
     \caption{\label{lb}Polynomial kernel-Degree} 
\end{figure}\\
We set degree=3, while changing the value of C and run SVM on Polynomial kernel, we get the result in Figure 4. We can see error is 0 no matter how we change the value of C.
 \begin{figure}[!h] 
     \centering   
     \includegraphics[width=9cm, height=7.5cm]{prob4/PolyC.pdf} 
     \caption{\label{lb}Polynomial kernel-C} 
\end{figure}\\ 
When we run SVM on RBF kernel, we first fix C=0.001, and change sigma. We get the result in Figure 5. We can see that while sigma is smaller than  $2^{-3}$ and bigger than 2, for the problem of under-fitting and over-fitting, error$\neq$0, in other conditions, error=0.
 \begin{figure}[!h] 
     \centering   
     \includegraphics[width=9cm, height=7.5cm]{prob4/RbfS.pdf} 
     \caption{\label{lb}RBF kernel-Sigma} 
\end{figure}\\
And then we fix sigma=$2^{-2}$ and change C, we get the result in Figure 6, error is 0 while changing the value of C.s
 \begin{figure}[!h] 
     \centering   
     \includegraphics[width=9cm, height=7.5cm]{prob4/RbfC.pdf} 
     \caption{\label{lb}RBF kernel-C} 
\end{figure}\\
\end{document}