\documentclass[twoside,a4paper]{ctexart}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

% useful packages.
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{multicol}
\usepackage{fancyhdr}
\usepackage{layout}

% some common command
\newcommand{\dif}{\mathrm{d}}
\newcommand{\avg}[1]{\left\langle #1 \right\rangle}
\newcommand{\difFrac}[2]{\frac{\dif #1}{\dif #2}}
\newcommand{\pdfFrac}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\OFL}{\mathrm{OFL}}
\newcommand{\UFL}{\mathrm{UFL}}
\newcommand{\fl}{\mathrm{fl}}
\newcommand{\op}{\odot}
\newcommand{\Eabs}{E_{\mathrm{abs}}}
\newcommand{\Erel}{E_{\mathrm{rel}}}

\begin{document}

\pagestyle{fancy}
\fancyhead{}
\lhead{邵柯欣 (3200103310)}
\chead{Data Modeling homework 3}
\rhead{\today}

\section*{I. （4.1）用极大似然估计法推出朴素贝叶斯法中的概率估计公式(4.8)及公式(4.9).}

\subsection*{解：}
公式(4.8)
\begin{equation}
  P(Y = c_k) = \dfrac{\sum_{i = 1}^{N}I(y_i = c_k)}{N}, k = 1,2,\dots,K
  \label{eq::01}
\end{equation}
设$P(Y = c_k) = \theta_k \Longrightarrow P(Y \ne c_k) = 1 - \theta_k$\\
设训练集Ｔ中$c_k$的数量是$n_k$，则极大似然估计的似然函数为
$$P(y_1,y_2,\dots,y_N|\theta_k) = \prod^N_{i = 1}P(y_i|\theta_k) = (\theta_k)^{n_k}(1 - \theta_k)^{N - n_k}$$
取对数得
$$ln(P(y_1,y_2,\dots,y_N|\theta_k)) = ln((\theta_k)^{n_k}(1 - \theta_k)^{N - n_k}) = n_kln(\theta_k) + (N - n_k)ln(1 - \theta_k)$$
关于$\theta_k$求导
$$\dfrac{\partial ln(P(y_1,y_2,\dots,y_N|\theta_k))}{\partial \theta_k} = \dfrac{n_k}{\theta_k} - \dfrac{N - n_k}{1 - \theta_k} = 0$$
解得$\theta_k = \dfrac{n_k}{N} = \dfrac{\sum^N_{i = 1}I(y_i) = c_k}{N}$.

公式(4.9)
\begin{equation}
  P(X^j = a_{ij}|Y = c_k) = \dfrac{\sum_{i = 1}^{N}I(x^j_i = a_{jl}, y_i = c_k)}{\sum^N_{i = 1}I(y_i = c_k)}, j = 1,2,\dots,n; l = 1,2,\dots,S_j; k = 1,2,\dots,K
  \label{eq::02}
\end{equation}
$$P(X^j = a_{jl}|Y = c_k) = \dfrac{P(X^j = a_{jl},Y = c_k)}{P(Y = c_k)}$$
设$P(X^j = a_{jl},Y = c_k) = \theta$
$$P((x^j_1,y_1), (x^j_2,y_2), \dots, (x^j_1,y_N)) = \prod^N_{i = 1}P(x_i^j,y_i) = \theta^n(1 - \theta)^{N - n}$$
解得$\theta = \dfrac{n}{N}$
$$n = \sum_{i = 1}^NI(x_i^j = a_{jl}, y_i = c_k) \Longrightarrow \theta = \dfrac{\sum_{i=1}^{N}I(x_{i}^{j} = a_{jl}, y_{i} = c_{k})}{N}$$

\section*{II. （4.2）用贝叶斯估计法推出朴素贝叶斯法中的概率估计公式(4.10)及公式(4.11).}

\subsection*{解：}
(4.11)
\begin{equation}
  P_{\lambda}(Y = c_k) = \dfrac{\sum^N_{i = 1}I(y_i = c_k) + \lambda}{N + K\lambda}
  \label{eq::03}
\end{equation}
设$P_{\lambda}(Y = c_k) = \theta$,$\theta$的先验分布的概率密度函数是均匀分布$p(\theta) = 1$,\\
由贝叶斯估计算法，
$$P(\theta | T) = \dfrac{P(\theta,T)}{P(T)} = \dfrac{P(\theta)P(T|\theta)}{P(T)}$$
将贝叶斯估计进行极大似然的估计$P(Y \ne c_k​)=1−\theta$
$$P(y_1​,y_2​,\dots,y_N​∣theta)= \theta^{n_k​+\lambda}(1−\theta)^{N−n_k​+K\lambda}$$
使用极大化后验概率
$$\theta′= argmax_{\theta}​P(y_1​,y_2​,\dots,y_N​∣\theta)= argmax​ \theta^{n_k​+\lambda}(1−\theta)^{N−n_k​+K\lambda}$$
对最后一部分取对数，然后极大化，得到
$$\theta = \dfrac{n_k + \lambda}{N+K\lambda} ​= \dfrac{\sum^N_{i = 1}I(y_i​=c_k​)+\lambda}{N+K\lambda}​$$

设训练集 T 中类标记 $c_{k}$ ​的数量为 $n_{c_{k}}$​​,我们将数据集 T 中类标记是 $c_{k}$ ​的数据挑选出来构造新的数据集$T_{c_k}$
$$P_{\lambda}​(X^j=a_{jl}​∣Y=c_k​,T)=P_{\lambda}​(X^j=a_{jl}​∣T_{c_k}​​)$$
设$P_{\lambda}​(X^j=a_{jl}​∣T_{c_k}​​)=\theta$,$\theta$的先验分布是均匀分布，概率密度函数是 $p(\theta) = 1$
$$\theta′= argmax_{\theta}​ \theta^{n+\lambda}(1−\theta)^{n_{c_k}​​+S_j​\lambda−n−\lambda}$$
$$n = \sum_{i=1}^N​I(x_i^j ​= a_{jl}​,y_i​=c_k​),n_{c_k}​​ = \sum_{i=1}^N​I(y_i​=c_k​) \Longrightarrow \theta′ = \dfrac{n + \lambda}{n_{c_k​}​+S_j\lambda}$$
$$P_λ​(X^j = a_{jl}​∣Y = c_k​,T) = {\sum_{i=1}^N​I(x_{i}^j ​= a_{jl}​,y_i​=c_k​)+\lambda​}{\sum_{i=1}^N​I(y_i​ = c_k​)+S_j​ \lambda}$$
\end{document}
