\documentclass[oneside]{homework} %%Change `twoside' to `oneside' if you are printing only on the one side of each sheet.

\studname{Ran Yu}
\studmail{ry2239@columbia.edu}
\coursename{Natrual Language Processing}
\hwNo{1}
\uni{ry2239}

\begin{document}
\maketitle
Programming Problem

\section*{Question 4}
\begin{itemize}
\item Compute $e(x|y)=\frac{Count(y\leadsto x)}{Count(y)}$.\\
	 First of all, run $count_freq.py$ and get the count of $ner\underline~train.dat$, and stored them in $ner.counts$.\\
	 To get $Count(y \leadsto x)$ and $Count(y)$, I iterated all the lines in $ner.counts$. If the second word in line is 'WORDTAG' then I stored the first word as the number of how many times the words was tagged as $Count(y \leadsto x)$. And if the second word in line is '1-GRAM' then I stored the first word as the number of how many times instance was tagged by this tag as $Count(y)$.\\
	 $Count(y \leadsto x)$ / $Count(y)$ is the emission parameters we want.   \\
	 Script is in $compute\underline~em\underline~par.py$, and the result is in $emission.dat$.\\
	 Command line is :$python~compute\underline~em\underline~par.py$
\item Replace\\
	Iterated all the lines in $ner\underline~train.dat$, using a dictionary[words]=frequency to store the words and its frequency. After iteration, if dictionary[words]<=5, then replace it with '\underline~RARE\underline~' and write into a new file.\\
	The code is in $replace\underline~rare.py$, result of replacement is in $ner\underline~train\underline~rep.dat$, the result of re-run $count\underline~freq.py$ is in $emission\underline~rep.dat$.\\
	Command line is: $python replace\underline~rare.py$
\item Baseline Approach\\
	In the $ner\underline~dev.dat$, for every word in it, I checked the $emission\underline~rep.dat$ found out the maximum $e(word|tag)$ and tagged the word with the tag.
	Code is in $tag\underline~base.py$, result is in $ner\underline~dev.tag$.\\
	Command line is:$python~tag\underline~base.py$\\
	The performance of the model:\\
\begin{center}
\begin{tabular}{p{3cm}p{3cm}p{3cm}p{3cm}}
~ & $Precision$ & $recall$ & $F1-Score$ \\
\hline
$Total$ & $ 0.206$ & $0.505$ & $0.293$\\
$PER$ & $0.407$ & $0.193$ & $0.261$\\
$ORG$ & $0.464$ & $0.371$ & $0.413$\\
$LOC$ & $0.139$ & $0.868$ & $0.239$\\
$MISC$ & $0.491$ & $0.603$ & $0.541$\\
\end{tabular}
\end{center}
	The performance is not very satisfied, the total precision is just around 20\%. The precision of 'LOC' is especially small. The performance need future improvement.
\end{itemize}
\section*{Question 5}
\begin{itemize}
\item Compute $q(y_i|y_{i-2},y_{i-1})=\frac{Count(y_{i-2},y_{i-1},y_i)}{Count(y_{i-2},y_{i-1})}$\\
Read in the lines in $ner\underline~rep.counts$, the file generated by $count\underline~freq.py$ of the replaced $ner\underline~train.dat$, and get all the count of 3-GRAM and 2-GRAM. The final result would be ${Count(y_{i-2},y_{i-1},y_i)}/{Count(y_{i-2},y_{i-1})}$
Code is in $tag\underline~count.py$ and result is in $trigram\underline~prob.dat$.
Command line is: $python~tag\underline~count.py$\\
\item implement Viterbi Algorithm and tag the words\\
Using the data in $emission\underline~rep.dat$ as input $e(x|y)$, data in $trigram\underline~prob.dat$ as input $q(v|w,u)$, lines in $ner\underline~dev.dat$ as input sentence, and implement Viterbi Algorithm.
Code is in $tag.py$ and result is in $ner\underline~dev\underline~viterbi.tag$.\\
Command line is:$python~tag.py$\\
The performance of the model:\\
\begin{center}
\begin{tabular}{p{3cm}p{3cm}p{3cm}p{3cm}}
~ & $Precision$ & $recall$ & $F1-Score$ \\
\hline
$Total$ & $ 0.763$ & $0.589$ & $0.665$\\
$PER$ & $0.734$ & $0.557$ & $0.637$\\
$ORG$ & $0.596$ & $0.451$ & $0.513$\\
$LOC$ & $0.866$ & $0.676$ & $0.759$\\
$MISC$ & $0.826$ & $0.681$ & $0.746$\\
\end{tabular}
\end{center}
The table above illustrates that HMM model significantly improve the performance of the tagging.
\end{itemize}
\section*{Question 6}
I introduce 3 class into replacement. One is all number ones replaced with '\underline~NUM\underline~', one is the words start with capital letters replaced with '\underline~FIRST\underline~CAPITAL\underline~', and the words with all capital letters with '\underline~ALLCAP\underline~'.\\
The performance of the model:\\
\begin{center}
\begin{tabular}{p{3cm}p{3cm}p{3cm}p{3cm}}
~ & $Precision$ & $recall$ & $F1-Score$ \\
\hline
$Total$ & $ 0.763$ & $0.591$ & $0.666$\\
$PER$ & $0.707$ & $0.567$ & $0.630$\\
$ORG$ & $0.629$ & $0.440$ & $0.518$\\
$LOC$ & $0.874$ & $0.679$ & $0.764$\\
$MISC$ & $0.827$ & $0.682$ & $0.748$\\
\end{tabular}
\end{center}
And we can see that the performance was improved.\\
The code of replace is in $replace\underline~class.py$.\\
The command should be:\\
$python~replace\underline~class.py;\\~ python~count\underline~freqs.py ner\underline~train\underline~rep.dat~>~ner.counts;\\~ python ~compute\underline~em\underline~rep.py;\\~ python ~tag\underline~count.py;\\~ python~ tag.py$
\end{document}