\documentclass[11pt]{article}
\usepackage{fullpage} 
\usepackage{times} 
\usepackage{graphics}
\usepackage{amssymb}
\usepackage{url}
\usepackage{color}
\usepackage{graphicx}
\usepackage{fancyvrb}
\usepackage{textcomp}


\graphicspath{{/home/vineetm/cse250/proj/pr3/}}
\begin{document}

\input{lib/tex/macros.tex}

\title{ \Large CSE 291 - Assignment 1}
\author{\small Soumyarupa De,
\small Chris Louie
\small and Sanjukta Mitra \\\\
Department of Computer Science and Engineering \\ 
University of California, San Diego \\ 
\{\small sde,cmlouie,samitra\}@cs.ucsd.edu}

\maketitle

\input{abstract.tex}
\input{intro.tex}
\input{flow.tex}
\input{methodology.tex}
\input{results.tex}
\input{conclusion.tex}

% \begin{abstract}
% \noindent
% TODO: write abstract.
% \end{abstract}
% 
% \section {Introduction}
% TODO: write
% 
% \section {Description of Datasets}
% \subsection {Datasets}
% 
% {\bf Classic400 Dataset:}
% Classic400[1] consists of 400 documents which are divided as follows : 100 documents from MEDLINE (from medical journals), 100 documents from CISI (from information retrieval papers) and 200 documents from CRANFIELD (from the aeronautical system papers). It has a vocabulary length of 6205 words.\\
% 
% {\bf BBC Dataset:}
% The second dataset that we use in this project is the BBC dataset. It consists of 2225 documents which are divided as follows : 510 documents from category of business, 386 from entertainment, 417 from politics, 511 from sport and 401 from technology. It has a vocabulary of 9635 words\\
% 
% \subsection {Data Structures}
% 
% We represent each document as a bag of words using the following data structures:
% \begin {itemize}
% \item Word List   - A vector containing all the words in the corpus.
% \item Word Count  - A sparse matrix which stores the frequency of each word in a document.
% \item True Labels - A vector containing the true label for each document.
% \end {itemize}
% 
% \section {Algorithms}
% \subsection {Algorithm description and update rule}
% We use collapsed Gibbs sampling algorithm as explained in class notes[3] to learn the parameters of the model. The parameters are
% \begin{itemize}
% \item $\theta$ which represents the topic distribution for a given document
% \item $\phi$ which represents the word distributions for a given topic 
% \item $\alpha$, a fixed hyper-parameter which is used to draw $\theta$ according to Dirichlet distribution
% \item $\beta$, a fixed hyper-parameter which is used to draw $\phi$ according to Dirichlet distribution
% \end{itemize}
% 
% The algorithm works by estimating the hidden topic $z_{i}$ for each word in the corpus, where $i$ represents the position of the word. We denote $\bar{w}$ as the sequence of words making up the entire corpus and $\bar{z}$ as the corresponding sequence of topics. Update rule for topic distributions for a given word is given by:
% \begin{equation}
% p(z_{i} = j | \bar{z'}, \bar{w}) \propto \frac{q_{jw_{i}}' + \beta_{wi}} {\sum_{t}q_{jt}' + \beta_{t}} \frac{n_{mj}' + \alpha_{j}} {\sum_{k}n_{mk}' + \alpha_{k}}
% \end{equation}
% 
% where 
% \begin{itemize}
% \item $q_{jw_{i}}'$ is number of times topic $j$ appears in the entire corpus other than position $i$
% \item $n_{mj}'$ is the number of times topic $j$ appears in the document $m$ other than position $i$
% \item $\bar{z'}$ is the sequence of topics words other than position $i$
% \end{itemize}
% 
% The update rule is run for every word for all the documents present in the corpus.
% 
% \subsection{Initialization and Optimizations}
% In the above algorithm we initialise multinomial $\theta$ as $\frac{1}{K}$ for every document. We sample the initial value of $z$ using $\theta$, where $K$ represents the number of topics.
% 
% From equation(1) the expression $\sum_{k}n_{mk}' + \alpha_{k}$ is independent of topic $j$. Thus our update rule becomes:
% \begin{equation}
% p(z_{i} = j | \bar{z'}, \bar{w}) = \frac{q_{jw_{i}}' + \beta_{wi}} {\sum_{t}q_{jt}' + \beta_{t}} \frac{n_{mj}' + \alpha_{j}} {Z}
% \end{equation}
% 
% where $Z$ is the normalization constant.
% 
% \subsection{Computing $\theta$ and $\phi$}
% Once Gibbs sampling reaches convergence, we compute $\theta$ and $\phi$ using the following equations:
% \begin{equation}
% \theta_{mj} = \frac{n_{m_{j}} + \alpha} {\sum_{k}{n_{mj}} + \alpha_{k}}
% \end{equation}
% 
% \begin{equation}
% \phi_{jw_{i}} = \frac{q_{j_{w_{i}}} + \beta} {\sum_{t}{q_{j_{w_{i}}}} + \beta_{t}}
% \end{equation}
% 
% \subsection{Time and Space Complexity}
% The time complexity of Gibbs Sampling Algorithm is $O(NK)$ since equation(2) needs to be computed for every topic $j$, where $N$ is the length of the corpus. We need to store the topic counts for a given document($N_{mk}$), which takes $O(MK)$ space where $M$ represents the number of documents. We also need to store the topics assigned for each word in the vocabulary($Q_{kt}$), which takes $O(KV)$ where $V$ is the length of the vocabulary. To store the topic assigned for each word we need $O(N)$ space. Thus our space complexity becomes $O(KV)$. It takes 6s for one epoch to run for Classic400 dataset
% 
% \section {Likelihood}
% Likelihood for the corpus can be obtained by following equation:
% \begin{equation}
% p(W | \alpha, \beta) = \prod_{m=1}^{M} p(\bar{w_{m}} | \alpha, \beta)
% \end{equation}
% 
% where,
% \begin{equation}
% p(\bar{w_{m}} | \alpha, \beta) = \sum_{z_{m,n}} \sum_{\theta_{m}} \sum_{\phi} p(\bar{w_{m}}, \bar{z_{m}}, \theta_{m}, \phi | \alpha, \beta)
% \end{equation}
% 
% and
% \begin{equation}
% p(\bar{w_{m}}, \bar{z_{m}}, \theta_{m}, \phi | \alpha, \beta) =  \prod_{n=1}^{N_{m}} p(w_{m,n} | \phi_{z_{m,n}}) p(z_{m,n} | \theta_{m}) p(\theta_{m} | \alpha) p(\phi | \beta)
% \end{equation}
% where $N_m$ is the number of words in the $m^{th}$ document.\\
% 
% By the definition of Dirichlet distribution, we have the following relations:
% \begin{equation}
% p(\theta_{m} | \alpha) = \frac{\prod_{k=1}^{K} \theta_{m}^{\alpha_{k}-1}}{D(\alpha)}
% \end{equation}
% \begin{equation}
% p(\phi | \beta) = \prod_{k=1}^{K} \frac{1}{D(\alpha)} \prod_{t=1}^{V} \phi_{kt}^{\beta_t-1}
% \end{equation}
% 
% We know that $p(\bar{z_{m}} | \theta_{m})$ = $\prod_{n=1}^{N_{m}} p(z_{m,n} | \theta_{m})$ \\
% 
% We can write $p(\bar{z_{m}} | \theta_{m})$ in terms of the parameter $\theta_{m}$ as
% \begin{equation}
% p(\bar{z_{m}} | \theta_{m}) = \prod_{k=1}^{K} \theta_{m,k}^{n_{mk}}
% \end{equation}
% where $n_{mk}$ is the number of times the topic $z_{m}$ occurs in document $m$ \\
% 
% $p(w_{m,n} | \phi_{z_{m,n}})$ can be written in terms of the parameter $\phi_{z_{m,n}}$ as
% \begin{equation}
% p(z_{m,n} | \phi_{m}) = \prod_{t=1}^{V} \phi_{z_{m,n}}^{r_{z_{m,n}t}}
% \end{equation}
% where $r_{z_{m,n}}t$ is the number of times topic $z_{m,n}$ with the word $t$ in the document $m$
% 
% Both the values $n_{mk}$ and $r_{z_{m,n}}t$ can be obtained from matrices $N_{mk}$ and $R_{kt}$, which can be populated in each iteration of the Gibb's sampling algorithm. Note that we have not used the $R_{kt}$ in our algorithm. To compute the log likelihood we need to populate this matrix also.\\
% 
% Hence, we can compute the log likelihood by substituting equations(6) to equations(11) in equation(5).
% 
% \section {Correctness of Algorithms}
% 
% \subsection {Generative Process}
% To prove the correctness of the Gibb's sampling algorithm, we first generate a document as follows:-
% \begin{enumerate}
% \item Start with an all-zero wordcount data structure (described in section 2.2)
% \item Fix hyper -parameters $\alpha$ of length $K$ (number of topics) and $\beta$ of length $V$ (vocabulary size)
% \item For every topic $k$, sample word-topic distribution $\phi_k$ using dirichlet distribution over the hyper-parameter $\alpha$
% \item For every document $m$, sample topic-document distribution $\theta_m$ using dirichlet distribution over the hyper-parameter $\beta$
% \item For each word $n$ in the document $m$, sample topic index $z_{m,n}$ using a multinomial drawn randomly from $\theta_m$, and sample word $w_{m,n}$ using a multinomial $w$ drawn randomly from $\phi_{z_{m,n}}$
% \item For every word $w_{m,n}$ drawn using step 5, increment the wordcount for the $m^{th}$ document and $w^{th}$ word in wordcount data structure.
% \end{enumerate}
% 
% \subsection {Proof of Correctness}
% At the end of the above algorithm, we would have generated a document represented by our wordcount data structure. We also would have the distributions $\theta$ and $\phi$ with which we generated the document.\\
% 
% We now apply our Gibb's sampling algorithm on the document we just generated and learn the word-topic and topic-document distributions $\phi'$ and $\theta'$ respectively. We next compare the values of $\phi$ and $\phi'$, and $\theta$ and $\theta'$. If they match, it means that our Gibb's sampling algorithm is successful in learning the distributions.
% 
% \subsection {Observations}
% We made the following observations after using the above process to validate the correctness of our algorithm:
% \begin{itemize}
% \item The values of $\theta$ and $\theta'$ matrices were matching upto their second decimal places.
% \item Most of the values of $\phi$ and $\phi'$ were zeroes or were close to zero (significant digit in the fourth decimal place). However, all the zero values and most of the non zero values concurred with the highest difference being 0.0003.
% \end{itemize}
% 
% \section{Overfitting}
% We can determine whether an LDA model overfits its training data by evaluating perplexity for a given model. Perplexity tells if a given models generalises well to unseen data. We can do this by holding out test data and then evaluating it on held out data
% 
% \section {Design of Experiments}
% \subsection {Tuning the hyper-parameters}
% We first fixed our hyper-parameters $\alpha$ and $\beta$ to $50/K$ ($K$ is the number of topics) and 0.01 respectively using the science topics paper[1] as a guideline. We made a series of experiments by increasing and decreasing the value of $\alpha$ and $\beta$. Here are our observations:
% \begin{itemize}
% \item As we increased the value of $\alpha$, the values $\theta_k$ for each topic $k$ were getting closer to each other. At $\alpha = 500/K$, we found that the largest value in the $\theta$ matrix was 0.6302. This means the model was predicting that most documents belong to more than one topic and resulted in a bad clustering.
% \item As we decreased the value of $\alpha$, the values $\theta_k$ for each topic $k$ were moving farther apart. At $\alpha = 5/K$, we found that the largest value in the $\theta$ matrix was 0.9. This means the model was predicting that the document belongs to a particular topic and hence resulted in a good clustering.
% \item As we increased the value of $\beta$, we found that there was a high degree of overlap for the top ten words for each topic. This was especially the case with CRANFIELD and CISI topics in the classic400 dataset since each of these topics had only 100 words. This means that the model had effectively reduced the number of topics 
% 
% \item Decreasing the value of $\beta$ however did not give any improvement as we were already seeing good clustering with $\beta = 0.01$.
% \end{itemize}
% 
% \subsection {Measures of convergence and accuracy}
% We checked for convergence and accuracy based on the following two measures:
% \begin{itemize}
% \item Manual validation - By looking at the values of $\theta$ and the top ten words for each topics, and ensuring that there is a good clustering of words-topics and documents-topics. We did this by running the Gibb's sampling algorithm for a huge (500) number of epochs and storing these values after the end of every epoch so that we could compare these values from epoch to epoch to check for accuracy and convergence. Accuracy is decided based on how good the clustering is for both documents and words. Convergence is decided by looking at the closeness in the values of $\theta$ and the top ten words from epoch to epoch.
% \item Purity - This is an objective measure of accuracy and convergence. We calculate purity as follows:
% \begin{enumerate}
% \item Assign a topic $k$ for each document $m$ such that $k = $ argmax $(\theta_{m,k})$
% \item Using the results for step 1 and the true labels, populate a confusion matrix of the size $K \times K$ 
% \item Calculate purity by finding the accuracy from the confusion matrix (by counting the number of correctly labeled documents and dividing it by the total number of documents).
% \end{enumerate}
% Purity values closer to 1 indicate a higher accuracy. Convergence is measured by looking at the difference between purity values upto the third decimal place from one epoch to another.
% \end{itemize}
% 
% \subsection {Number of topics}
% After fixing the values of $\alpha$ and $\beta$, we experimented by increasing the number of topics. We made the following observations:
% \begin{itemize}
% \item We observed faster convergence with higher number of topics. For the classic400 dataset, we observed convergence at $152^{nd}$ epoch with $4$ topics as opposed to $263^{rd}$ epoch with $3$ topics. We attribute this to the fact that hyper-parameter $\alpha$ was reduced to $50/4$ from $50/3$ in case of 4 topics.
% \item One of the topics manifested itself as two and the documents belonging to that topic got divided amongst its two manifestations. For the classic400 dataset, we observed that the CRANFIELD topic was divided into two and 193 documents classified correctly, 112 were classified as CRANFIELD-1 and the rest as CRANFIELD-2. This can be observed by the confusion matrix in Table 4 and the top 10 words for each topic in Table 3.
% \end{itemize}
% 
% \section {Results of Experiments}
% 
% % \graphicspath{{/home/vineetm/cse240/proj/pr4/}}
% % \begin{figure}[!ht]
% % %\begin{minipage}[b]{0.45\linewidth}
% % \includegraphics[width=7.0in]{classic400.jpg}
% % \caption {Classic400 Dataset}
% % \label{fig:Fig1}
% % %\end{minipage}
% % \hspace{1cm}
% % %\begin{minipage}[b]{0.45\linewidth}
% % \includegraphics[width=7.0in]{bbc.jpg}
% % \caption {BBC Dataset}
% % \label{fig:Fig2}
% % %\end{minipage}
% % \end{figure}
% 
% We achieve purity levels of 0.9675 and for Classic 400 Dataset and 0.9285 for BBC Datasets. Detailed confusion matrices for purity are shown in Table 4 and Table 6 for the Classic400 and BBC Dataset respectively.
% The following tables show the results we obtained after training the Classic400 and BBC datasets. The purity of each dataset is also shown in the tables.
% % Here it is: the code that adjusts justification and spacing around caption.
% \makeatletter
% % http://www.texnik.de/floats/caption.phtml
% % This does spacing around caption.
% \setlength{\abovecaptionskip}{6pt}   % 0.5cm as an example
% \setlength{\belowcaptionskip}{6pt}   % 0.5cm as an example
% % This does justification (left) of caption.
% \long\def\@makecaption#1#2{%
%   \vskip\abovecaptionskip
%   \sbox\@tempboxa{#1: #2}%
%   \ifdim \wd\@tempboxa >\hsize
%     #1: #2\par
%   \else
%     \global \@minipagefalse
%     \hb@xt@\hsize{\box\@tempboxa\hfil}%
%   \fi
%   \vskip\belowcaptionskip}
% \makeatother
% 
% \begin{table}[t]
% \caption{Classic400 top 10 words for 3 topics}
% \begin{tabular}{| l | l | l |}
% \hline
% CRANFIELD & CISI & MEDILINE \\ \hline
% boundary & system & patients \\
% layer & problems & ventricular \\
% wing & research & cases \\
% mach & methods & fatty \\
% supersonic & scientific & left \\
% ratio & retrieval & nickel \\
% wings & general & acids \\
% velocity & data & aortic \\
% shock & language & blood \\
% effects & development & time \\ \hline
%  \end{tabular}
% \label{Table1}
% \end {table}
% 
% \begin{table}[t]
% \caption{Classic400 purity matrix for 3 topics}
% \begin{tabular} {| l | l | l |}
% \hline
%      1  &   2  &  97 \\ \hline
%      0   & 99  &   1 \\ \hline
%    191   &  9 &    0 \\ \hline
% \end{tabular}
% \label{Table2}
% \end{table}
% 
% \begin{table}
% \caption{Classic400 top 10 words for 4 topics}
% \begin{tabular} {| l | l | l | l |}
% \hline
% CRANFIELD1 & CRANFIELD2 & CISI & MEDILINE \\ \hline
% layer  & solution & system & patients \\
% boundary & plate & research & ventricular\\
% wing & cylinder &scientific &fatty\\
% mach & temperature &retrieval&cases\\
% supersonic & distribution &methods&left\\
% ratio   & problem &development&nickel\\
% wings   & case &language&acids\\
% shock   & small &science&aortic\\
% surface & order &subject&blood\\
% effects & bodies &data&glucose\\ \hline
% \end{tabular}
% \label{Table3}
% \end{table}
% 
% \begin{table}
% \caption{Classic400 purity matrix for 4 topics}
% \begin{tabular} {| l | l | l | l|}
% \hline
%      3   & 96  &   0 &    1 \\ \hline
%     98    & 1  &   0  &   1 \\ \hline
%      7   &  0 &  112  &  81 \\ \hline
% \end{tabular}
% \label{Table4}
% \end{table}
% 
% \begin{table}
% \caption{BBC Dataset top 10 words for 5 topics}
% \begin{tabular} {| l | l | l | l | l |}
% \hline
% Tech & Politics & Entertainment &Business & Sports \\ \hline
% peopl  & govern & film & year & game\\
% game & peopl & best & compani & plai\\
% technolog & labour &year &market & win\\
% mobil & parti &award &firm & england\\
% phone & elect &music &bank & player\\
% servic   & minist &star &sale & against\\
% user   & plan &show &price & first\\
% comput   & told &includ &share & time\\
% on & blair &on &growth &world\\
% get & sai &top &rate&club\\ \hline
% \end{tabular}
% \label{Table5}
% \end{table}
% 
% \begin{table}
% \caption{BBC dataset purity matrix for 5 topics}
% \begin{tabular} {| l | l | l | l| l |}
% \hline
%     9  &  39  &   1 &  461    & 0 \\ \hline
%      7  &  27 &  349 &    3   &  0\\ \hline
%      1 &  412  &   1 &    3   &  0\\ \hline
%      0  &  12  &  18 &    2  & 479\\ \hline
%    365 &   22  &   6 &    6 &    2\\ \hline
% \end{tabular}
% \label{Table6}
% \end{table}
% 
% \section{References}
% [1] Reference: Banerjee A, Dhillon IS, Ghosh J, Sra S. Clustering on the unit hypersphere using von Mises-Fisher distributions. J. Mach. Learn. Res. 2005;6:1345–1382
% 
% [2] Greene, D. and Cunningham, P. (2006), Practical solutions to the problem of diagonal dominance in kernel document clustering, Proc. 23rd International Conference on Machine learning (ICML 2006)
% 
% [3] Charles Elkan, Text mining and topic models. March 2011
% 
% 
\bibliography{../bib}
\bibliographystyle{alpha}

\end{document}
