\documentclass[oneside]{homework} %%Change `twoside' to `oneside' if you are printing only on the one side of each sheet.

\studname{Ran~Yu}
\studmail{ry2239@columbia.edu}
\coursename{Natural Language Processing}
\hwNo{1}
\uni{ry2239}

\begin{document}
\maketitle 

\section*{Problem 1}
As what is presented in the slides, Lec2,
\begin{align*}
perplexity=2^{-l} ~where~ l=\frac{1}{M} \sum_{i=1}^m\log{p(s_{i})}\\
and ~M~ is~ the~ total~ number~ of~ words~ in~ the~ test~ data. 
\end{align*}
Hence, if we are trying to $minimize the perplexity$ of the language model on the validation data, we need to $maximize 'l'$ in the equation.\\
And as what $Trigram Language Model$ defined before,
\begin{align*}
p(x_{1}\dots x_{n})=\prod_{i=1}^{n}q(x_{i}|x_{i-2},x_{i-1})\\
where~ we~ define~ x_0 = x_1 =*.
\end{align*}
We can expand the equation for l,
\begin{align*}
l&=\frac{1}{M} \sum_{i=1}^m(\log{\prod_{i=1}^{n}q(x_{i}|x_{i-2},x_{i-1})})\\
&=\frac{1}{M} \sum_{i=1}^m\sum_{i=1}^{n}(\log{q(x_{i}|x_{i-2},x_{i-1})})
\end{align*}
As $c'(w_1,w_2,w_3)$ defined as the number of times the trigram $w_1, w_2, w_3$ is seen in the validation sentences, we can get that,
\begin{align*}
l=\sum_{w_1,w_2,w_3}c'(w_1,w_2,w_3)\log{q(x_{i}|x_{i-2},x_{i-1})}=L(\lambda_1,\lambda_2,\lambda_3)
\end{align*}
Therefore, choosing $\lambda$ values that maximize $L(\lambda_1,\lambda_2,\lambda_3)$ is equivalent to maximize $l$ value, that is, $minimize the perplexity$ of the language model on the validation data.

\section*{Problem 2}
The problem of the estimation method is that it cannot be proved as probability distribution correctness and it could only handle limited-number-words sentences.
\begin{align*}
\sum_{w\in \nu}q(w|w_{i-2},w_{i-1})  =&\sum_{w\in\nu}[\lambda_1^{\Phi(w_{i-2},w_{i-1},w_i)}\times q_{ML}(w|w_{i-2},w_{i-1})\\
											&~~+\lambda_2^{\Phi(w_{i-2},w_{i-1},w_i)}\times q_{ML}(w|w_{i-1})\\
											&~~+\lambda_3^{\Phi(w_{i-2},w_{i-1},w_i)}\times q_{ML}(w)]\\
=&\sum_{w\in \nu}[\lambda_1^{\Phi(w_{i-2},w_{i-1},w_i)}\times q_{ML}(w|w_{i-2},w_{i-1})]\\
&+\sum_{w\in \nu}[\lambda_2^{\Phi(w_{i-2},w_{i-1},w_i)}\times q_{ML}(w|w_{i-1})]\\
&+\sum_{w\in \nu}[\lambda_3^{\Phi(w_{i-2},w_{i-1},w_i)}\times q_{ML}(w)]
\end{align*}
As for that the value of $\lambda_j^i$ is related to $w_{i-2},w_{i-1},w_i$, the equation could not be further simplified.\\
From the equation above, it does not neccesarily show that $\sum_{w\in \nu}q(w|w_{i-2},w_{i-1})$ equals $1$, which proves the correctness of the estimate distribution.\\
On the other hand, every time we try to solve the equation $\sum_{w\in \nu}q(w|w_{i-2},w_{i-1}) =1$,we have 12 smoothing parameters in it. It means that we will get a group of linear equations with 12 paramters, and with the increase of the number of sentences and words, the probability of no solution increase.\\
Therefore, the estimation method has problem that should be fixed.  
\section*{Problem 3}
The pseudo-code for modified Viterbi Algorithm,\\
\textbf{Input:} a sentence $x_1, x_2, \dots , x_n$, $T(x_i)$, parameters $q(s|u, v)$ and $e(x|s)$.\\
\textbf{Initialization:} Set $\pi(0,*,*) = 1$, and $\pi(0,u,v) = 0$ for all $(u,v)$ such that $u \neq *$ or $v \neq *$.\\
\textbf{Algorithm:}
\begin{flalign*}
&For~k=1, \dots, n,&&\\
&~~~~For~u \in T(x_k), v \in T(x_k),&&\\
&~~~~~~~~~\pi(k, u, v) =\max_{w \in S} (\pi(k − 1, w, u) \times q(v|w, u) \times e(x_k|v)) &&\\
&Return \max_{u\in S,v\in S} (\pi (n, u, v) \times q(STOP|u, v))&&
\end{flalign*}
For that $|T (x)| \le K$, we can get that the loop for $u\in T(x_k), v \in T(x_k)$ runs for $O(K^2)$, and the equation that gets the max$\pi(k,u,v)$ runs for $O(K)$, and the loop that iterating $x_i$ runs for $O(n)$, hence the total runtime of the algorithm is $O(n \times K^2 \times K) = O(nk^3)$.


\end{document}