% please textwrap! It helps svn not have conflicts across a multitude of
% lines.
%
% vim:set textwidth=78:

\section{Results and Evaluation}
\label{sec:results}

% Example of a table
\ignore {
\begin{figure*}[!t]
\begin{center}
\begin{tabular}{l l}
$- 0.099 *$ RFA\_2\_1 = 4 \\
$+ 0.110 *$ RFA\_2\_1 = 1 \\
$- 0.044 *$ RFA\_2\_1 = 3 \\
$- 0.015 *$ RFA\_2\_1 = 3 \\
$- 0.066 *$ RFA\_2\_2 = E \\
$+ 0.105 *$ RFA\_2\_2 = G \\
$+ 0.027 *$ RFA\_2\_2 = F \\
$- 0.105 *$ RFA\_2\_2 = D \\
$- 0.058 *$ RFA\_3\_0 = S \\
$+ 0.009 *$ RFA\_3\_1 = 4 \\
$+ 0.001 *$ RFA\_3\_1 = 3 \\
$+ 0.012 *$ ADATE\_10 \\
$- 0.019 *$ ADATE\_11 \\
$+ 0.034 *$ ADATE\_12 \\
$- 0.064 *$ NGIFTALL \\
$+ 0.002 *$ LASTGIFT \\
$- 0.110 *$ LASTDATE \\
$- 0.020 *$ AVGGIFT \\
$+ 0.688$ \\
\end{tabular}
\end{center}
\caption{\figtitle{Fast Large Margin Logistic Regression Model.}
The model produced by the linear SVM using the optimal cost value C found.}
\label{fig:logistic_model}
\end{figure*}
}

\subsection{Evaluation Methodology}
In order to validate whether a particular model is effective at predicting
ratings, we use the given 5-fold cross validation on the learning data. We
calculate MSE and MAE for each of the folds and average the error across each
fold. Lower errors indicate a better model; however, incredibly low errors 
could be due to overfitting and should be avoided.

\subsection{MSE and MAE}
After determining how to tune our parameters, our final model produced a MSE
of \optmse{} for a rank of \optk{}. In addition to evaluating MSE, we also report the MAE obtained
on the predictions. Figure \ref{fig:k_mse_mae} shows the MSE and MAE obtained 
for different ranks of factorizations. The values of MAE are lower than those 
of corresponding MSE, since the absolute errors are close to square roots of 
the corresponding mean squared errors, and for values of MSE more 
then $1.0$, the MAE would be less than $1.0$.

\begin{figure}[!t]
\center
\includegraphics[width=0.5\textwidth]{img/k_mse_mae.eps}
\caption{\figtitle{The Effect of Rank Approximation on MSE.}
We observe a decay of MSE as we increase $k$ which corroborates our
expectation that with residual fitting, we successively diminish the MSE.
}
\label{fig:k_mse_mae}
\end{figure}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% XXX: From his dm.pdf:
%
% In general the mean and the median are different, so in general predictions
% that minimize MAE are different from predictions that minimize MSE.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection{Limitations}
Since we only validated our model using cross validation, we do not know how
it will perform on real data.

\subsection{Scalability}
Our model is unlikely to scale to handling something as large as the Netflix
dataset \cite{netflixprize} containing on the order of $10^8$ ratings. Just
our sparse representation would require about 11GB assuming four bytes for each 
the user and movie id fields and the rating. Since our implementation uses a single sparse
matrix to hold this data, it must all live on one node with sufficient memory.
None of the machines we have access to, contain this much main memory, so we
will be unable to build a model for the Netflix dataset.

Although most operating systems support paging to disk or other slower
higher capacity devices, our program would not finish in reasonable time. As
soon as a program begins to thrash and continually page in and out to disk,
the access time for these pages becomes $4$ orders of magnitude slower. Even
programs that took a minute to execute now take about a week with thrashing.
Since our model runs in $1306$ seconds, our model while thrashing would run in
about $15$ days.

If we have a machine with sufficient memory available, running our
algorithm on the Netflix dataset would incur a computaional cost of
$O(kxt)$ where $k$ is the rank of approximation, $x$ is the total number of
ratings, and $t$ is the number of epochs until convergence. Thus, with
$10^8$ ratings, the computational cost is higher than $10^8$.
