\section{Expectation Maximization (EM)}
The Expectation Maximization (EM) algorithm is one approach to unsupervised, semi-supervised, or lightly supervised learning. Given a model $p$ (parameterized by $\theta$) of observable variables $\bfx_1, \cdots, \bfx_n$ and latent variables $\bfz_1, \cdots, \bfz_m$, EM is an efficient method that \textbf{approximately} solves the following optimization problem,
\begin{equation}
	\theta^* = \argmax_{\theta} \max_{\bfz_1, \cdots, \bfz_n} p(\bfx_1, \cdots, \bfx_n, \bfz_1, \cdots, \bfz_m \mid \theta)
\end{equation}

\begin{definition}[EM Principle]
	Let $\cX = \{\bfx_1, \cdots, \bfx_n\}$ be the observable variables, $\cX_L = \{\bfz_1, \cdots, \bfz_m\}$ be the latent variables. The EM method is to repeat the following procedure until the convergence of $\theta$. \marginpar{\footnotesize Intuitively, EM is to optimize two group of coordinates alternatively. When optimizing one group, the other group is fixed.
	
	This is called ``coordinates descent''.}
	\begin{enumerate}
		\item \textbf{Expectation step}: Calculate a function $Q$ of $\theta$, given  previous-estimated $\theta^{(j)}$,
		$$
Q\left({\theta}; {\theta}^{(j)}\right) := \mathbb{E}_{\mathcal{X}_{L}}\left[L\left(\mathcal{X}, \mathcal{X}_{L} \mid {\theta}\right) \lvert\  \mathcal{X}, {\theta}^{(j)}\right]
$$
Here, $L(\mathcal{X}, \mathcal{X}_{L} \mid {\theta}) = \log p(\mathcal{X}, \mathcal{X}_{L} \mid {\theta})$ is the log-likelihood function.
	\item \textbf{Maximization step}: Estimate new parameter ${\theta}^{(j + 1)}$ by maximizing the function $Q$,
	$$
	\theta^{(j + 1)} = \argmax_\theta Q\left({\theta}; {\theta}^{(j)}\right).
	$$
	\end{enumerate}
\end{definition}
\begin{theorem}[EM's Correctness]
	 EM method is guaranteed to converge to a point with gradient of $0$.	
\end{theorem}
\textit{Proof Sketch.}

\subsection{K-Means Clustering}
\begin{definition}[K-Means Problem]
	Given $d$-dimensional sample vectors $\cX = \{ \bfx_1, \cdots, \bfx_n\}$ and an assignment function $c(\bfx): \R^d \rightarrow \{1, \cdots, k\}$ with prototypes $\bfmu_{c(\cdot)} \in \cY \subset \R^d$. The k-means finds the $c(\cdot)$ and $\cY$ that minimize 
	$$
	R(c, \cY) = \sum_{\bfx \in \cC} \norm{\bfx - \bfmu_{c(\bfx)}}^2_2.
	$$
\end{definition}
The k-means problem is a mixture problem of combinatorial and continuous optimization, which is hard to optimize directly. Practically, K-Means relies on the \textbf{Hard-EM} technique, as shown in Alg.~\ref{alg:kmeans}.

\begin{algorithm}[H]
\setstretch{1.3}
 \caption{K-Means, an example of Hard-EM \label{alg:kmeans}}
\SetAlgoLined
 
 \While{$c(\bfx)$ and $\bfmu_{c}$ keep changing}{
  $c(\bfx) = \argmin_{c \in \{1, \cdots, k\}} \norm{\bfx - \bfmu_c}^2_2$\Comment*[r]{E-step: assign $\bfx$ to the nearest prototypes}
  $\displaystyle \bfmu_\alpha = \frac{1}{|N_\alpha|} \sum_{\bfx \in N_\alpha} \bfx$, where $N_\alpha = \{\bfx\mid c(\bfx) = \alpha\}$
  \Comment*[r]{M-step: update prototypes}
 }
\KwOut{ $c(\bfx), \forall \bfx \in \cX$}
\end{algorithm}

\subsection{Gaussian Mixture Models (GMM)}
In mixture models, data are assumed to be generated by a mixture of distribution $p(\bfx \mid \theta)$. Mixture models are generative, which tries to describe all the data. 
\begin{definition}[GMM]
	A Gaussian mixture is a convex combination of $k$ Gaussian distributions,
	$$
p\left(\mathbf{x} \mid \pi_{1}, \ldots, \pi_{k}, {\theta}_{1}, \ldots, {\theta}_{k}\right)=\sum_{c \leq k} \pi_{c} p\left(\mathbf{x} \mid {\theta}_{c}\right), \where p(\bfx \mid \theta_c) = \cN(\bfx; \mu_c, \Sigma_c).
$$
Here, $\pi_c > 0$ is the mixture weight, denoting the prior probability that a sample is generated by the mixture Gaussian component $c$ with parameters $\theta_c = \{\mu_c, \Sigma_c \}$.
\end{definition}

To estimate parameters $\theta_c$, we can maximize its likelihood of sample feature vectors $\cX = \{ \bfx_1, \cdots, \bfx_n\}$. The log-likelihood is often computationally preferable, as
\begin{equation}
	L(\cX; \pi_{1}, \ldots, \pi_{k}, {\theta}_{1}, \ldots, {\theta}_{k}) = \sum_{\bfx \in \cX} \log \sum_{c\leq k}{\pi_c p(\bfx \mid \theta_c)}.
\end{equation}
However, to directly maximize it is still intractable, because the logarithm within the sum makes it a non-convex problem. Alg.~\ref{alg:gmm} shows the Soft-EM method that solves the GMM problem.

\begin{algorithm}[H]
\setstretch{1.4}
 \caption{GMM, an example of Soft-EM \label{alg:gmm}}
\SetAlgoLined
 
 \While{$c(\bfx)$ and $\bfmu_{c}$ keep changing}{
  $\gamma_{\bfx, c} = \displaystyle \frac{p(\bfx \mid c, \theta^{(j)}\ p(c \mid \theta^{(j)})}{p(\bfx \mid \theta^{(j)})}$\Comment*[r]{E-step: soft-assign sample $\bfx$ to clusters}
  \vspace{1mm}
  $\displaystyle \bfmu_c^{(j + 1)} = \frac{\sum_{\bfx} \gamma_{\bfx, c}\, \bfx}{\sum_\bfx \gamma_{\bfx, c}}$,\ \ $\displaystyle \Sigma_c^{(j + 1)} = \frac{\sum_{\bfx} \gamma_{\bfx, c} (\bfx - \bfmu_c)^2}{\sum_\bfx \gamma_{\bfx, c}}$\Comment*[r]{M-step: update parameters}
  \vspace{2mm}
  $\displaystyle \pi_c^{(j + 1)} = \frac{1}{|\cX|}\sum_{\bfx} \gamma_{\bfx, c}$\;
 }
\KwOut{ $c(\bfx), \forall \bfx \in \cX$}
\end{algorithm}
Here, $\gamma_{\bfx, c}$ is the ``responsible probability'', denoting the probability that $\bfx$ belongs to component $c$, i.e. $\gamma_{\bfx, c} = p(y = c | \bfx, \theta)$. 

Next, we will proof the correctness of 
\begin{proof}
	
\end{proof}
\section{Dirichlet Process}

\begin{definition}[Beta Distribution]
	$$
	\mathrm{Beta}(\theta; a, b) = \frac{1}{\beta(a, b)} \theta^{a-1} (1-\theta)^{b - 1}, \where \beta(a, b) = \frac{\Gamma(a)\Gamma(b)}{\Gamma(a + b)}
	$$
	Here, $\Gamma(z)$ is the Gamma function, $\Gamma(z) = \int_0^\infty x^{z-1} e^{-x} \dx$.
\end{definition}
\remark Beta distribution is the probability $\theta$ of a Bernoulli process after observing $a - 1$ successes and $b - 1$ failures. 

Please note that the Binomial distribution $\mathrm{Bin}(k; n, p) = C(n, k) p^k (1-p)^{n-k}$ is a function of number of success trials $k \in \mathbb{N}$, while the Beta distribution $\mathrm{Beta}(\theta; a, b)$ is a function of the success probability $\theta \in [0, 1]$.

\begin{definition}[Dirichlet Distribution]
	Dirichlet distribution is the multivariate generalization of the beta distribution. Given $\bfx = \{ x_1, \cdots, x_n \}\ (x_i \in [0, 1])$ and $\boldsymbol{\alpha} = \{ \alpha_1, \cdots, \alpha_n \}\ (\alpha_i > 0)$,
	$$
	\mathrm{Dir}(\bfx; \boldsymbol{\alpha}) = \frac{1}{\beta(\boldsymbol{\alpha})} \prod_{k \leq n} x_k^{\alpha_k - 1}, \where \beta(\boldsymbol{\alpha}) = \frac{\prod_{k\leq n} \Gamma(\alpha_k)}{\Gamma(\sum_{k\leq n} \alpha_k)}
	$$
\end{definition}

\begin{definition}[Dirichlet Process]
	A Dirichlet Process $DP(\alpha, H)$ is a stochastic process whose sample paths are probability distributions on a space $\Theta$. Here, $\alpha > 0$ is the ``concentration parameter'', and $H$ is the base measure on $\Theta$. For any measurable finite partition of $\Theta$, denoted by $\{B_1, \cdots, B_n\}$, if $G\sim DP(\alpha, H)$, then 
	$$
	(G(B_1), \cdots, G(B_n)) \sim \mathrm{Dir}(\alpha H(B_1), \cdots, \alpha H(B_n))
	$$
\end{definition}
\remark Note that $H$ is continuous, so the probability that any two samples are equal is precisely zero. However, $G$ is a discrete distribution, made up of a countably infinite number of point masses, and thus there is a non-zero probability of two samples colliding.

\begin{theorem}[De Finetti's Theorem]
Let $(X_1, X_2, \cdots)$ be an infinitely exchangeable sequence of random variables. Then, $\forall n$:
$$
p(X_1, \cdots, X_n) = \int (\prod_i p(x_i \mid G))\ \mathrm{d} P(G),
$$
for some random variable $G$.	
\end{theorem}
\remark An infinitely exchangeable sequence can be represented by a product of conditionally independent random variables.

\subsection{Stick-Breaking Process}
Stick-breaking process provides a constructive way to draw samples from $DP(\alpha, H)$.

\begin{algorithm}[H]
\setstretch{1.3}
 \caption{Stick-Breaking Process}
\SetAlgoLined
 
 \For{$k = 1, 2 \cdots$}{
  $\beta_k \sim \mathrm{Beta}(1, \alpha)$\;
  $\rho_k = \beta_k \prod_{i=1}^{k - 1} (1 - \beta_i) $
  \Comment*[r]{alternatively, $\rho_k = \beta_k (1 - \sum_{i=1}^{k-1}\rho_i) $}
  $\theta_k \sim H$\;
 }
\KwOut{ $G(\theta) = \sum_{k=1}^\infty \rho_k \delta_{\theta_k}(\theta) $
}
\end{algorithm}

Here, $\delta_t(\theta)$ is the Dirac function.

\begin{definition}[GEM Distribution]
	The $\{\rho_1, \rho_2, \cdots\}$'s distribution in the Stick-breaking process is called GEM distribution, named after Griffiths-Engen-McCloskey.
	$$
	\{\rho_1, \rho_2, \cdots\} \sim \mathrm{GEM}(\alpha)\quad \Longrightarrow \quad \forall k, \rho_k = \beta_k (1 - \sum_{i=1}^{k-1}\rho_i), \where \beta_k \sim \mathrm{Beta}(1, \alpha).
	$$
\end{definition}

\subsection{Chinese Restaurant Process (CRP)}
The Chinese restaurant process (CRP) is a metaphor of Dirichlet process.
\begin{definition}[CRP]
Let $\cP = \{\tau_1, \cdots, \tau_k \}$ denote a $k$-partition over the integers $\{1, \cdots, n\} $. The partition $\cP$ represents the table assignment, i.e., $|\tau_i|$ is the number of people sitting at table $i$.

	When a new person arrives, he can either join an existing table $i$ $(1 \leq i \leq k)$ with probability proportional to $|\tau_i|$, or start a new table with probability proportional to $\alpha$.
	More formally,
$$
p(n + 1 \text{ joins table } \tau \mid \cP) = \left\{ \begin{aligned}
	\frac{|\tau|}{\alpha + n} & \quad \tau \in \cP, &\quad (\text{to join table } i )\\
	\frac{\alpha}{\alpha + n} & \quad \tau \notin P. &\quad (\text{to start a new table})
\end{aligned}\right.
$$
\end{definition}
\textbf{Remark} The larger the $\alpha$ is, the greater the number of clusters (tables) is.

\begin{property}[CRP is Exchangeable]
	No matter in which order people come, the probability of a given partition $\cP$ is the same, i.e.,  
	\begin{equation}
		p(\cP) = \frac{\alpha^k}{\alpha (\alpha+1)\cdots (\alpha + n - 1)} \prod_{1 \leq i \leq k} (|\tau_i| - 1)!
	\end{equation}
\end{property}

\begin{property}[Number of Occupied Tables in CRP]
	The number of occupied tables in CRP after $N$ customers is
	\begin{equation}
		S(N) = \sum_{1 \leq i \leq N} \frac{\alpha}{\alpha + i - 1} \sim \mathcal{O}(\alpha\log N).
	\end{equation}
\end{property}
\begin{proof}
Let $\cP_i$ be the partition after $i$ customers.
The The number of occupied tables after $N$ customers can be written as, 
\begin{align}
	S(N) & = \E{}{\sum_{1 \leq i \leq N} \Ind{\tau_i \notin \cP_i} } \\
	& = \sum_{1 \leq i \leq N} \E{}{\Ind{\tau_i \notin \cP_i}} \\
	& = \sum_{1 \leq i \leq N} p(\mathrm{start\ a\ new\ table} \mid n = i) \\
	& = \sum_{1 \leq i \leq N} \frac{\alpha}{\alpha + i - 1}
\end{align}
\end{proof}

\begin{exercise}[Ex.8-2: Dirichlet Process]
	Consider the following algorithm for sampling from the Dirichlet process with base distribution $F_0$ and concentration parameter $\alpha$. 
	\begin{enumerate}
		\item Draw the first sample $X_1 \sim F_0$.
		\item For $i = 2, 3, \cdots $, draw
\begin{equation}
X_{i} \mid Z_{1}, \ldots, X_{i-1}=\left\{\begin{array}{ll}
X \sim \hat{F}_{i-1}, & \text { with probability } p=\frac{i-1}{\alpha+i-1} \\
X \sim F_{0}, & \text { with probability } p=\frac{\alpha}{\alpha+i-1}
\end{array}\right.
\end{equation}
where $\widehat{F}_{i - 1}$ is the empirical distribution of $X_1, \cdots , X_{i - 1}$.
	\end{enumerate}
Find the asymptotics of the expected number of distinct samples drawn, as a function of the total number of
samples drawn: $X_1, \cdots , X_n$. Or equivalently, the number of occupied tables in the Chinese restaurant process
metaphor.
\end{exercise}
\begin{sol}
Note that the base distribution $F_0$ is a continuous, so the probability of sampling a sample $X'$ that equal to any number of finite sample $X_1, \cdots X_i$ is $0$. The distinct sample after $n$ samples drawn, 
\begin{align}
	S(n) & = \E{}{\sum_{i \leq n} \Ind{x_i\cup \{X_1, \cdots, X_{i-1} \} = \phi} } \\
	& = \sum_{i \leq n} p(X_i \sim F_0)\ p(x_i\cup \{X_1, \cdots, X_{i-1} \} = \phi  \mid X_k \sim F_0) \\
	& = \sum_{i \leq n} \frac{\alpha}{\alpha + i - 1}
\end{align}

Then, it is easy to prove that $S(n) \sim O(\alpha \log n)$.

%\begin{align}
%\lim_{n \rightarrow +\infty} \frac{S(n)}{\alpha \log n} = 
%\end{align}
\end{sol}

\subsection{DP Mixture Model}
\begin{definition}
Let $\Theta$ be a set that parameterizes a set of probability distributions, and fix a base measure $H$ on $\Theta$. Here, we assume that $\Theta = \R $ and $H = \cN(\mu_0, \sigma_0)$ for some fixed $\mu_0 \in \R, \sigma_0 \in \R_+$. The DP Mixture model is defined as
	\begin{itemize}
		\item Probabilities of clusters (``mixture weights''): $\boldsymbol{\rho} = (\rho_1, \rho_2, \cdots) \sim GEM(\alpha)$,
		\item Centers of the clusters: $\mu_k \sim \cN(\mu_0, \sigma_0), \ k = 1,2,\cdots $,
		\item Assignments of data points to clusters: $z_i \sim \mathrm{Categorical}(\rho), \ i = 1,2,\cdots $
	\end{itemize}
\end{definition}

\subsection{Latent Dirichlet Allocation (LDA)}
Latent Dirichlet Allocation (LDA) is one of the most popular non-parametric model.
\begin{definition}
	Given $K$ topics and $V$ words in the vocabulary, for $M$ documents with $N$ words each.
	\begin{itemize}
		\item Distribution of topics in document $d$: $ \boldsymbol{\theta}_d \sim \mathrm{Dir}(\boldsymbol{\alpha});$
		\item What topic the word $w$ belongs to in document $d$: $z_{d, w} \sim \mathrm{Categorical}(\boldsymbol{\theta}_d); $
		\item Distribution of words in topic $k$: $ \boldsymbol{\psi}_k \sim \mathrm{Dir}(\boldsymbol{\beta}); $
		\item What word $w$ in document $d$: $w_{d} \sim \mathrm{Categorical}(\boldsymbol{\psi}_{z_{d, w}}) $
	\end{itemize} 
\end{definition}

\section{Sampling Methods}
\subsection{Markov-chain Monte Carlo (MCMC)}
Markov-chain Monte Carlo (MCMC) is a powerful framework, allowing sampling from a large class of distributions.

\begin{algorithm}[h]
\setstretch{1.25}
 \caption{MCMC Sampling}
\SetAlgoLined
 
 \For{$\tau = 1, 2 \cdots$}{
  $\bfz^* \sim p(\bfz \mid \bfz^{(\tau)})$\Comment*[r]{proposal distribution}
  $\alpha \sim \mathrm{Uniform}(0,1) $\Comment*[r]{acceptance threshold}
  \eIf{$A(\bfz^*, \bfz^{(\tau)}) = p(\bfz^*)\ p(\bfz^{(\tau)} \mid \bfz^*) > \alpha$}{$\bfz^{(\tau + 1)} = \bfz^*$}{$\bfz^{(\tau + 1)} = \bfz^{(\tau)}$}
 }
\KwOut{ $ \{\bfz^{(1)}, \bfz^{(2)}, \cdots \} $
}
\end{algorithm}

\subsection{Gibbs Sampling}
However, the converge of MCMC is typically slow. Gibbs sampling is a simple and faster method that samples one random variable from conditional distribution at a time, while the remaining variables fixed to their current values.
The theory of MCMC guarantees that the stationary distribution of the samples generated is the target joint posterior. 

\begin{algorithm}[H]
\setstretch{1.25}
 \caption{Gibbs Sampling}
\SetAlgoLined
 Initialize: $ \bfz^{(0)} \sim q(\bfz)$\;
 \For{$\tau = 1, 2 \cdots$}{
	$\bfz_{1}^{(\tau)} \sim p\left(Z_{1}=\bfz_{1} \mid Z_{2}=\bfz_{2}^{(\tau-1)}, Z_{3}=\bfz_{3}^{(\tau-1)}, \cdots, Z_{D}=\bfz_{D}^{(\tau-1)}\right)$\;
	$\bfz_{2}^{(\tau)} \sim p\left(Z_{2}=\bfz_{2} \mid Z_{1}=\bfz_{1}^{(\tau)}, Z_{3}=\bfz_{3}^{(\tau-1)}, \cdots, Z_{D}=\bfz_{D}^{(\tau-1)}\right)$\;
	$\vdots$\\
	$\bfz_{D}^{(\tau)} \sim p\left(Z_{D}=\bfz_{D} \mid Z_{1}=\bfz_{2}^{(\tau)}, Z_{2}=\bfz_{2}^{(\tau)}, \cdots, Z_{D-1}=\bfz_{D-1}^{(\tau)}\right)$\;

 }
\KwOut{ $ \{\bfz^{(1)}, \bfz^{(2)}, \cdots \} $
}
\end{algorithm}

\begin{property}[Gibbs Samples is a Special Case of Metropolis-Hastings]
	We can view the Gibbs sampling as a special case of Metropolis-Hastings, where the acceptance of Gibbs' samples is always $1$.
\end{property}
\begin{proof}
Let $\bfz_{k}$ denotes the the sample's projection at $k$-th dimension and $\bfz_{\backslash k}$ denotes the sample's projection at dimensions other than $k$. In Gibbs sampling, we have $ q_k(\bfz^* \mid \bfz) = p(\bfz_k^* \mid \bfz_{\backslash k})$ (every time we sample one variable, and fix others). Therefore,
	\begin{align}
		A\left(\bfz^{*}, \bfz\right)=\frac{p\left(\bfz^{*}\right) q_{k}\left(\bfz \mid \bfz^{*}\right)}{p(\bfz) q_{k}\left(\bfz^{*} \mid \bfz\right)}=\frac{p\left(z_{k}^{*} \mid \bfz_{\backslash k}^{*}\right) p\left(\bfz_{\backslash k}^{*}\right) p\left(z_{k} \mid \bfz_{\backslash k}^{*}\right)}{p\left(z_{k} \mid \bfz_{\backslash k}\right) p\left(\bfz_{\backslash k}\right) p\left(z_{k}^{*} \mid \bfz_{\backslash k}\right)}=1
	\end{align}
\end{proof}

\newpage

\begin{table}[hbp]
\center
\caption{Summary of some useful sampling methods}
\begin{tabular}{l l l l}
\toprule
\textbf{Method} & \textbf{Metropolis} & \textbf{Metropolis-Hastings} & \textbf{Gibbs} \\
\midrule
Proposal dist. & $q(\bfz \mid \bfz^{(\tau)})$ & $q(\bfz \mid \bfz^{(\tau)})$ & $p(\bfz_k \mid \bfz_{\backslash k})$ \\
Assumption & $q(\bfz_A \mid \bfz_B) = q(\bfz_B \mid \bfz_A)$ & - & -  \\
Accept prob.  & $
\min \left\{1, \frac{\displaystyle \tilde{p}\left(\mathbf{z}^{\star}\right)}{\displaystyle \tilde{p}\left(\mathbf{z}^{(\tau)}\right)}\right\}
$ & $
\min \left\{1, \frac{\displaystyle \tilde{p}\left(\mathbf{z}^{\star}\right) q_{k}\left(\mathbf{z}^{(\tau)} \mid \mathbf{z}^{\star}\right)}{\displaystyle \tilde{p}\left(\mathbf{z}^{(\tau)}\right) q_{k}\left(\mathbf{z}^{\star} \mid \mathbf{z}^{(\tau)}\right)}\right\} \ 
$ & $1$ \\
$A_{k}\left(\mathbf{z}^{\star}, \mathbf{z}^{(\tau)}\right)$ & & & \\
\bottomrule
\end{tabular}
\end{table}