\documentclass[paper=a4, fontsize=11pt]{scrartcl} % A4 paper and 11pt font size

\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\usepackage{fourier} % Use the Adobe Utopia font for the document - comment this line to return to the LaTeX default
\usepackage[english]{babel} % English language/hyphenation
\usepackage{amsmath,amsfonts,amsthm} % Math packages
\usepackage[UTF8]{ctex}
\usepackage{graphicx}
\usepackage{float}
\usepackage[top=2cm, bottom=2cm, left=2cm, right=2cm]{geometry}
\usepackage[linesnumbered,boxed,lined,ruled]{algorithm2e}
\usepackage{algorithmicx}
\usepackage{algpseudocode}

\usepackage{xcolor}
\usepackage[framed,numbered,autolinebreaks,useliterate]{mcode}
\usepackage{listings}
\usepackage{multirow}
\usepackage{sectsty} % Allows customizing section commands
\allsectionsfont{\centering \normalfont\scshape} % Make all sections centered, the default font and small caps
\usepackage{multirow}
\usepackage{fancyhdr} % Custom headers and footers
\pagestyle{fancyplain} % Makes all pages in the document conform to the custom headers and footers
\fancyhead{} % No page header - if you want one, create it in the same way as the footers below
\fancyfoot[L]{} % Empty left footer
\fancyfoot[C]{} % Empty center footer
\fancyfoot[R]{\thepage} % Page numbering for right footer
\renewcommand{\headrulewidth}{0pt} % Remove header underlines
\renewcommand{\footrulewidth}{0pt} % Remove footer underlines
\setlength{\headheight}{13.6pt} % Customize the height of the header

\numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)
\numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4)

\setlength\parindent{0pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text

%----------------------------------------------------------------------------------------
%	TITLE SECTION
%----------------------------------------------------------------------------------------

\newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height

\title{
\normalfont \normalsize
\textsc{中国科学院大学\ 计算机与控制学院} \\ [25pt] % Your university, school and/or department name(s)
\horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule
\huge 模式识别第三次作业 \\ % The assignment title
\horrule{2pt} \\[0.5cm] % Thick bottom horizontal rule
}

\author{黎吉国&201618013229046} % Your name

\date{\normalsize\today} % Today's date or a custom date

\begin{document}

\maketitle % Print the title
\newpage
\section{answer for 1st}
将样本规范化，增广化，则可得
\[ X=\{ x_1=(1,1,4)^T,x_2=(1,2,3)^T,x_3=(-1,-4,-1)^T,x_4=(-1,-3,-2)^T \}\]
下面是训练过程,$X_K$表示$X$中使得$a^T x<0$的样本的集合
\[
\begin{split}
a_0&=(0,1,0)^T \\
X_K&=\{ x_3,x_4\}\\
a_1&=a_0+\sum_{x\in X_K}= a_0+x_3+x_4 = (-2,-6,-3)^T\\
X_K&=\{ x_1,x_2 \}\\
a_2&=a_1+x_1+x_2=(0,-3,4)^T\\
X_K&=\{ x_4 \}\\
a_3&=a_2+x_4=(-1,-6,5)^T\\
X_K&=\{ x_1 \}\\
a_4&=a_3+x_1=(0,-4,5)^T\\
X_K&=\{ \}
\end{split}
\]
则线性判别函数为$ g(y)=a^T y=-4y_1+5y_2 $.

\newpage
\section{answer for 2ed}
决策面是三条直线$g_1(x)=g_2(x),g_1(x)=g_3(x),g_2(x)=g_3(x)$的一部分。
\[
\begin{split}
g_1(x)&=g_2(x) \to x_1=\frac{1}{2}\\
g_1(x)&=g_3(x) \to x_1+2x_2-1=0 \\
g_2(x)&=g_3(x) \to x_1+2x_2=0 \\
\end{split}
\]
画图如下：
\begin{figure}[H]
  \centering
  \includegraphics[width=4in,height=3in]{ld.jpg}
  \caption{判别区域示意图}
  \label{fig:graph}
\end{figure}
为什么不存在不确定区域：\\
任取不再判别面上的点$x=(x_1',x_2')$，则有$g_1(x)\ne g_2(x)\ne g_3$，此时取$g_i(x)=\max{ \{ g_1(x),g_2(x),g_3(x)\} }$，
则可知其满足满觉条件，有$x\in \omega_i$，所以不存在不确定区域。
\newpage
\section{answer for 3th}
\renewcommand{\theenumi}{(\alph{enumi})}
\begin{enumerate}
  \item 先证$x_a$到超平面$g(x)=0$的距离是$|g(x_1)|/\|w\|$\\
  \begin{figure}[H]
    \centering
    \includegraphics[width=2.5in,height=3in]{distance.jpg}
    \caption{$x$到$g(x)=0$的距离}
    \label{fig:graph}
  \end{figure}
  如上图所示，$r=\|x_a-x_p\|$便是$x$到判决面$g(x)=0$的距离。
  \[
  \begin{split}
    x_a&=x_p + r\frac{w}{\|w\|}\\
    g(a)&=w^T x_a+w_0\\
    &=w^T(x_p + r \frac{w}{\| w\|})+w_0\\
    &=w^Tx_p+w_0+r\frac{w}{\|w \|}\\
    &=r\|w \| \\
    \text{we have } r&=\frac{g(x_a)}{\| w \|}
  \end{split}
  \]
  再证$x_a$到$g(x_a)=0$的距离是在超平面上的点中使得$\| x-x_a\|^2$最小的点。\\
  令$f(x)=\|x-x_a\|^2,x\in \{ x| g(x)=0 \}$\\
  \[
  \begin{split}
    f(x)&=\| x-x_a \|^2\\
    &=(x-x_a)^2\\
    &=(x-(x_p+r\frac{w}{\| w \|}))^2\\
    &=(x-x_p)^2-2r(x-x_p)^Tw/\|w\|+r^2\\
    &=(x-x_p)^2+r^2\\
    \text{when } x&=x_p,f(x)=\min{f(x)}=r^2
  \end{split}
  \]
  可知$x_p$便是使得$f(x)$最小的点，同时该点到$x_a$的距离便是$x_a$到平面$g(x)=0$的距离。
\item 由上图易知
\[
x_p&=x_a-r\frac{w}{\|w\|}=x_a-\frac{g(x_a)}{\|w\|^2}w
\]
\end{enumerate}
\newpage
\section{answer for 4th}
\renewcommand{\theenumi}{\arabic{enumi}}
\begin{enumerate}
  \item batch perception\\
  $\omega_1$和$\omega_2$训练过程如下:(过程比较繁琐，没有全部列出)
\begin{table}[H]
  \centering
  \begin{tabular}{|c|c|p{3.8cm}|}
    \hline
    k& a& X\_k(一行代表一个样本) \\
    \hline
    1& ( 0  -17.9000   33.3000)&
    1.0000    0.1000    1.1000
    1.0000    6.8000    7.1000
   .........     .........    .........
   -1.0000   -8.4000   -3.7000
   -1.0000   -4.1000    2.2000\\
   \hline
   2& 1.0000  -29.3000   23.7000 &
   1.0000   -3.5000   -4.1000
    1.0000   -0.8000   -1.3000
   -1.0000   -7.1000   -4.2000\\
   \hline
   ...&...&...\\
   \hline
   23&34.0000  -30.4000   34.1000&1.0000    4.1000    2.8000\\
   \hline
   24&34.0000  -30.4000   34.1000& \\
   \hline
  \end{tabular}
  \caption{$\omega_1$和$\omega_2$的训练过程}
\end{table}
\begin{table}[H]
  \centering
  \begin{tabular}{|c|c|p{3.8cm}|}
    \hline
    k& a& X\_k(一行代表一个样本) \\
    \hline
    1& 0   55.2000  -43.7000&
    1.0000    7.1000    4.2000
    1.0000   -1.4000   -4.3000
   .........     .........    .........
   -1.0000    5.1000   -1.6000
   -1.0000   -1.9000   -5.1000\\
   \hline
   2& -1.0000   52.3000  -45.8000 &
   -1.0000   -2.9000   -2.1000\\
   \hline
   ...&...&...\\
   \hline
   16&-19.0000   41.4000  -48.6000&-1.0000   -2.9000   -2.1000\\
   \hline
   17&-19.0000   41.4000  -48.6000& \\
   \hline
  \end{tabular}
  \caption{$\omega_3$和$\omega_2$的训练过程}
\end{table}
\textbf{比较结果：}\\
前者需要24次迭代，后者只需要16次迭代。至于原因，则是$\omega_1$和$\omega_2$之间的分离度没有$\omega_3$和$\omega_2$的分离度高，三类样本的分布如下：
\begin{figure}[H]
  \centering
  \includegraphics[width=3in,height=2.5in]{w1w2w3.jpg}
  \caption{$\omega_1,\omega_2,\omega_3$的样本分布}
  \label{fig:graph}
\end{figure}
\item 使用Ho-Kashyap算法对$w_2$和$w_4$进行分类的过程如下：\\
$b_{min}=1,a_0=(0,0,0)^T,b_0=(1,1,\ldots,0)^T$
\begin{table}[H]
  \centering
\begin{tabular}{|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|}
\hline
k& k=0& k=1& k=2& k=3& k=4\\
\hline
e&
-1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   -1
   &
   0.4045
  -1.0833
  -0.1129
   0.1944
  -0.0797
  -0.6594
  -0.5545
  -0.6226
   0.5619
  -0.2530
  -0.6754
  -0.0714
  -0.4033
   0.0066
  -0.2072
  -0.8484
  -0.2925
   0.1584
   0.0692
   0.0593
   &
   0.1572
  -1.0385
   0.1831
   0.2264
   0.2716
  -0.5256
  -0.4272
  -0.5609
   0.0401
  -0.0353
  -0.5770
  -0.1168
  -0.2851
   0.0509
  -0.1599
  -0.7530
  -0.1854
   0.0360
   0.1826
   0.0987
   &
   0.1938
  -1.0153
   0.0473
   0.0710
   0.0144
  -0.4421
  -0.3484
  -0.5269
   0.2933
   0.1050
  -0.5024
  -0.1422
  -0.1976
   0.0034
  -0.1214
  -0.6802
  -0.1056
   0.0882
   0.0228
   0.0269
   &
   0.1111
  -0.9796
   0.1336
   0.1605
   0.1851
  -0.3562
  -0.2626
  -0.4718
   0.0396
   0.0494
  -0.4688
  -0.1562
  -0.1482
   0.0299
  -0.0982
  -0.6524
  -0.0583
   0.0129
   0.0961
   0.0521\\
   \hline
\end{tabular}
\caption{使用Ho-kashyap算法对$\omega_2$和$\omega_4$分类的训练过程}
\end{table}
由上表可以看出负的error的绝对值是在慢慢减小的。

\end{enumerate}

\end{document}
