\documentclass[paper=a4, fontsize=11pt]{scrartcl} % A4 paper and 11pt font size

\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\usepackage{fourier} % Use the Adobe Utopia font for the document - comment this line to return to the LaTeX default
\usepackage[english]{babel} % English language/hyphenation
\usepackage{amsmath,amsfonts,amsthm} % Math packages
\usepackage[UTF8]{ctex}
\usepackage{graphicx}
\usepackage{float}
\usepackage[top=2cm, bottom=2cm, left=2cm, right=2cm]{geometry}
\usepackage[linesnumbered,boxed,lined,ruled]{algorithm2e}
\usepackage{algorithmicx}
\usepackage{algpseudocode}

\usepackage{xcolor}
\usepackage[framed,numbered,autolinebreaks,useliterate]{mcode}
\usepackage{listings}

\usepackage{sectsty} % Allows customizing section commands
\allsectionsfont{\centering \normalfont\scshape} % Make all sections centered, the default font and small caps

\usepackage{fancyhdr} % Custom headers and footers
\pagestyle{fancyplain} % Makes all pages in the document conform to the custom headers and footers
\fancyhead{} % No page header - if you want one, create it in the same way as the footers below
\fancyfoot[L]{} % Empty left footer
\fancyfoot[C]{} % Empty center footer
\fancyfoot[R]{\thepage} % Page numbering for right footer
\renewcommand{\headrulewidth}{0pt} % Remove header underlines
\renewcommand{\footrulewidth}{0pt} % Remove footer underlines
\setlength{\headheight}{13.6pt} % Customize the height of the header

\numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)

\setlength\parindent{0pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text

%----------------------------------------------------------------------------------------
%	TITLE SECTION
%----------------------------------------------------------------------------------------

\newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height

\title{
\normalfont \normalsize
\textsc{中国科学院大学\ 计算机与控制学院} \\ [25pt] % Your university, school and/or department name(s)
\horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule
\huge 模式识别第二次作业 \\ % The assignment title
\horrule{2pt} \\[0.5cm] % Thick bottom horizontal rule
}

\author{黎吉国&201618013229046} % Your name

\date{\normalsize\today} % Today's date or a custom date

\begin{document}

\maketitle % Print the title
\newpage
\section{1st}
\begin{enumerate}
  \item 设一维特征空间中的窗函数
  \[\varphi (u)=
  \begin{cases}
    1 &\mbox |u|<1/2 \\
    0 &\mbox otherwise\\
  \end{cases}
  \]
  有$n$个样本$x_i,i=1,2...n$,采用宽度为$h_n$的窗函数，请写出概率密度函数$\rho(x)$的Parzen窗估计$\rho_n(x)$.
  \item 给定一维空间三个样本点，请写出概率密度函数$\rho(x)$的最近邻(1-NN)估计并画出概率密度函数曲线图。
\end{enumerate}
\textbf{Solution:}\\
\begin{enumerate}
  \item 使用矩形窗的概率密度函数的Parzen窗的估计为
  \[ \rho_n(x)=\frac{k_n/n}{V_n}=\frac{1}{n}\sum_{i=1}^n \frac{1}{V_n}\varphi(\frac{\mathbf{x-x_i}}{h_n}),V_n=\frac{1}{\sqrt{n}} \]
  \item $k=1,n=3$，使用矩形窗核函数，则可得
  \[
  \rho(x)=
  \begin{cases}
    \frac{1}{6(-1-x)}& \mbox x<-1\\
    \frac{1}{6(x+1)}&  \mbox -1<x<-1/2\\
    \frac{1}{-6x}&     \mbox -1/2<x<0 \\
    \frac{1}{6x}&      \mbox 0<x<1 \\
    \frac{1}{6(2-x)}&  \mbox 1<x<2 \\
    \frac{1}{6(x-2)}&  \mbox x>2
  \end{cases}
  \]
  其函数图像如下：\\
  \begin{figure}[H]
  \centering
  \includegraphics[width=4in,height=3in]{rho.jpg}
  \caption{1-NN得到的概率密度函数}
  \label{fig:graph}
  \end{figure}
\end{enumerate}

\newpage
\section{2ed}
consider data $D=\left\{\begin{pmatrix} 1\\ 1 \end{pmatrix} ,\begin{pmatrix} 3\\ 3 \end{pmatrix},\begin{pmatrix} 2\\ * \end{pmatrix} \right\}$,
samples from a two-dimensioned (separable) disribution $p(x_1,x_2)=p(x_1)p(x_2)$,with
\[ p(x_1)\sim
\begin{cases}
  \frac{1}{\theta_1}e^{-\theta_1 x_1} &\mbox if\ x_1\ge 0\\
  0 &\mbox  otherwise
\end{cases}
 \]
 \[ p(x_2)\sim U(0,\theta_2)
 \begin{cases}
   \frac{1}{\theta _2}&\mbox if\ x\le x_2 \ge \theta\\
   0 &\mbox otherwise
 \end{cases}
  \]
  as usual, * represents a missing feature value.
  \begin{enumerate}
  \item Start with an initial estimate $\theta^0=(2\ 4)^T$ and analytically calculate $Q(\theta,\theta^0)$ the $\mathbf{E\ step}$ in the EM algorithm. Be sure to consider the normalization of your distribution.
  \item Find the $\theta$ that maximizes your $Q(\theta,\theta^0)$——the \textbf{M step}.
  \item Plot your data on a two-dimensional graph and indicate the new parameter estimates.
\end{enumerate}
\textbf{基础知识：}\\
E step:求解一个条件期望
\[
Q(\theta,\theta^i)=E_{D_g}\{ \ln P(D_g,D_b;\theta)|D_g;\theta^i \}
\]
M stpe:最大化得到下一次迭代的$\theta$:
\[ \theta^{i+1}=arg\ \max_{\theta} Q(\theta;\theta^{i}) \]
\textbf{Solution:}\\
\begin{enumerate}
  \item  \textbf{E step:}\\
  \[%\begin{aligned*}
  \begin{split}
    Q(\theta,\theta^i) &=E_{D_g}\{ \ln P(D_g,D_b;\theta)|D_g;\theta^i \} \\
    &= \int_{-\infty}^{+\infty}[ \sum_{k=1}^{2}\ln (x_k|\theta)+ \ln( (2\ x_{32})^T |\theta ) P(x_{32}|\theta^0,x_{31}=2) ]dx_{32}\\
    &= \sum_{k=1}^{2}[ \ln p(x_k|\theta) ]+\int_{-\infty}^{+\infty}\ln p((2\ x_{32})^T|\theta^0)\frac{p( (2\ x_32)^T|\theta )}{\int_{-\infty}^{+\infty} p( (2\ x_{32}')^T |\theta^0)dx_{32}'}dx_{32} \\
    &= \sum_{k=1}^{2}[ \ln p(x_k|\theta) ]+\int_{-\infty}^{+\infty}\ln p((2\ x_{32})^T|\theta^0)\frac{e^{-4}/8}{e^{-4}/2}\\
    &= \frac{1}{\theta_1 \theta_2}(e^{-2\theta_1}+e^{-\theta_1}+e^{-3\theta_1})
  \end{split}
  \]%\end{aligned*}
  \item
  \item
\end{enumerate}

\newpage
\section{3th}
用最大似然估计法估计类别$\omega_i$的先验概率$P(\omega_i)$。随机，独立地抽取$n$个样本，如果第$k$分样本属于$\omega_i,z_{ik}=1$，否则$z_{ik}=0$。
\begin{enumerate}
  \item 写出$P(z_{i1},\ldots,z_{in}|P(\omega_i))$的表达式。
  \item 给出$P(\omega_i)$，的最大似然估计。
\end{enumerate}
\begin{enumerate}
  \item
  易知$z_{ik},k=1,2,\ldots,n$是一个二项序列，有$P(z_{i1}=1|P(\omega_i))=P(\omega_i)^{z_{ik}}(1-P(\omega_i))^{1-z_{ik}}$,可得
  \[ P(z_{i1},z_{i2}\ldots,z_{in}|P(\omega_i))= P(\omega_i)^{\sum_{k=1}^{n}z_{ik}}(1-P(\omega_i))^{n-\sum_{k=1}^{n}z_{ik}}\]
  \item 这里要求$P(z_{i1},\ldots,z_{in}|P(\omega_i))$的极大值。我们令似然函数$l(z)=\ln(P(z_{i1},\ldots,z_{in}|P(\omega_i)))$
  \[
  \begin{split}
  P(\omega_i)&=arg\ max_{P(\omega_i)}(l(z))\\
  \nabla_{P(\omega_i)} l(z)&=\sum_{k=1}^{n}z_{ik}(1-P(\omega_i)) + (\sum_{k=1}^{n}z_{ik}-n)P(\omega_i)\\
  &=0\\
  P(\omega_i)&=\frac{\sum_{k=1}^{n}z_{ik}}{n}
\end{split}
  \]
\end{enumerate}

\newpage
\section{4th}
现在有两类二维样本如下：
\[
w_1:(-1,0),(-2,0),(-2,1),(-2,-1),(-3,-1),(-2,0.5),(-2,-0.5),(0,0)
\]
\[
w_2:(-1,0),(0,0),(1,1),(2,1),(2,-1)
\]
\begin{enumerate}
  \item 请分别采用1近邻和3近邻设计分类器
  \item 可能出现不同类的样本都是某个点近邻的情形。针对此情形，请采用拒绝分类规则重新设计分类器。
  \item K近邻算法的优缺点是什么。
\end{enumerate}
我们设定
\[w_1:x_{11}，x_{12},\ldots,x_{18}\]
\[w_2:x_{21}，x_{22},\ldots,x_{25}\]
\begin{enumerate}
  \item
  则最近邻分类器为
  \[ g_1(x)= \min{|| x-x_{1i} ||},i=1,2,...,8  \]
  \[ g_2(x)= \min{|| x-x_{2i} ||},i=1,2,...,5  \]
  \[x\in
  \begin{cases}
    w_1 &\mbox g_1(x)<=g_2(x)\\
    w_2 &\mbox g_2(x)<g_1(x)
  \end{cases}
  \]
  3 近邻分类器为
  \[ g_1(x)= \min_3{|| x-x_{1i} ||},i=1,2,...,8  \]
  \[ g_2(x)= \min_3{|| x-x_{2i} ||},i=1,2,...,5  \]
  $\min_3$表示求第三小。
  \[x\in
  \begin{cases}
    w_1 &\mbox g_1(x)<=g_2(x)\\
    w_2 &\mbox g_2(x)<g_1(x)
  \end{cases}
  \]
  \item 若不同类样本都是某个点的近邻，则拒绝分类，我们用$w_j$表示拒绝分类，则新的分类器是：\\
  最近邻分类器为
  \[ g_1(x)= \min{|| x-x_{1i} ||},i=1,2,...,8  \]
  \[ g_2(x)= \min{|| x-x_{2i} ||},i=1,2,...,5  \]
  \[x\in
  \begin{cases}
    w_1 &\mbox g_1(x)<g_2(x)\\
    w_2 &\mbox g_2(x)<g_1(x)\\
    w_j &\mbox g_2(x)=g_1(x)
  \end{cases}
  \]
  3 近邻分类器为
  \[ g_1(x)= \min_3{|| x-x_{1i} ||},i=1,2,...,8  \]
  \[ g_2(x)= \min_3{|| x-x_{2i} ||},i=1,2,...,5  \]
  $\min_3$表示求第三小。
  \[x\in
  \begin{cases}
    w_1 &\mbox g_1(x)<g_2(x)\\
    w_2 &\mbox g_2(x)<g_1(x)\\
    w_j &\mbox g_2(x)=g_1(x)
  \end{cases}
  \]
  \item k近邻算法思想简单，容易实现，而且样本增减容易实现，容易实现在线学习。但是它的计算复杂度比较高，为$O(dn^2)$，$n$为样本个数，$d$为特征向量的维数。
\end{enumerate}

\newpage
\section{实验题}
请使用KNN(k-nearest neighbor)对MNIST数据集进行分类，比较不同参数下的结果进行讨论，请把实验结果和相应的讨论写在提交的pdf中。

\textbf{不同的k对应的测试结果如下：}(核心代码及测试，见附录)
\begin{centering}
\begin{table}[!hpp]
\begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|}
\hline
k & 1&2&3&4&5&6&7&8&9&10\\
\hline
accuracy& 0.9660 & 0.9560 & 0.9660 & 0.9680 &0.9660 & 0.9640& 0.9680 & 0.9660 & 0.9640 & 0.9560\\
\hline
\end{tabular}
\end{table}
\end{centering}
从上表可以看出，不同的k值的选取对k近邻算法的识别率是有影响的，不过整体来说，识别率还算稳定，

\end{document}
