\documentclass[conference]{IEEEtran}

\IEEEoverridecommandlockouts
% Some very useful LaTeX packages include:
% (uncomment the ones you want to load)


% *** MISC UTILITY PACKAGES ***
%
%\usepackage{ifpdf}
% Heiko Oberdiek's ifpdf.sty is very useful if you need conditional
% compilation based on whether the output is pdf or dvi.
% usage:
% \ifpdf
%   % pdf code
% \else
%   % dvi code
% \fi
% The latest version of ifpdf.sty can be obtained from:
% http://www.ctan.org/tex-archive/macros/latex/contrib/oberdiek/
% Also, note that IEEEtran.cls V1.7 and later provides a builtin
% \ifCLASSINFOpdf conditional that works the same way.
% When switching from latex to pdflatex and vice-versa, the compiler may
% have to be run twice to clear warning/error messages.






% *** CITATION PACKAGES ***
%
\usepackage{cite}
% cite.sty was written by Donald Arseneau
% V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
% \cite{} output to follow that of IEEE. Loading the cite package will
% result in citation numbers being automatically sorted and properly
% "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
% cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
% \cite will automatically add leading space, if needed. Use cite.sty's
% noadjust option (cite.sty V3.8 and later) if you want to turn this off.
% cite.sty is already installed on most LaTeX systems. Be sure and use
% version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
% not currently provide for hyperlinked citations.
% The latest version can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
% The documentation is contained in the cite.sty file itself.






% *** GRAPHICS RELATED PACKAGES ***
%
\ifCLASSINFOpdf
   \usepackage[pdftex]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../pdf/}{../jpeg/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
\else
  % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
  % will default to the driver specified in the system graphics.cfg if no
  % driver is specified.
  % \usepackage[dvips]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../eps/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.eps}
\fi
% graphicx was written by David Carlisle and Sebastian Rahtz. It is
% required if you want graphics, photos, etc. graphicx.sty is already
% installed on most LaTeX systems. The latest version and documentation can
% be obtained at: 
% http://www.ctan.org/tex-archive/macros/latex/required/graphics/
% Another good source of documentation is "Using Imported Graphics in
% LaTeX2e" by Keith Reckdahl which can be found as epslatex.ps or
% epslatex.pdf at: http://www.ctan.org/tex-archive/info/
%
% latex, and pdflatex in dvi mode, support graphics in encapsulated
% postscript (.eps) format. pdflatex in pdf mode supports graphics
% in .pdf, .jpeg, .png and .mps (metapost) formats. Users should ensure
% that all non-photo figures use a vector format (.eps, .pdf, .mps) and
% not a bitmapped formats (.jpeg, .png). IEEE frowns on bitmapped formats
% which can result in "jaggedy"/blurry rendering of lines and letters as
% well as large increases in file sizes.
%
% You can find documentation about the pdfTeX application at:
% http://www.tug.org/applications/pdftex





% *** MATH PACKAGES ***
%
\usepackage[cmex10]{amsmath}
% A popular package from the American Mathematical Society that provides
% many useful and powerful commands for dealing with mathematics. If using
% it, be sure to load this package with the cmex10 option to ensure that
% only type 1 fonts will utilized at all point sizes. Without this option,
% it is possible that some math symbols, particularly those within
% footnotes, will be rendered in bitmap form which will result in a
% document that can not be IEEE Xplore compliant!
%
% Also, note that the amsmath package sets \interdisplaylinepenalty to 10000
% thus preventing page breaks from occurring within multiline equations. Use:
%\interdisplaylinepenalty=2500
% after loading amsmath to restore such page breaks as IEEEtran.cls normally
% does. amsmath.sty is already installed on most LaTeX systems. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/amslatex/math/





% *** SPECIALIZED LIST PACKAGES ***
%
%\usepackage{algorithmic}
% algorithmic.sty was written by Peter Williams and Rogerio Brito.
% This package provides an algorithmic environment fo describing algorithms.
% You can use the algorithmic environment in-text or within a figure
% environment to provide for a floating algorithm. Do NOT use the algorithm
% floating environment provided by algorithm.sty (by the same authors) or
% algorithm2e.sty (by Christophe Fiorio) as IEEE does not use dedicated
% algorithm float types and packages that provide these will not provide
% correct IEEE style captions. The latest version and documentation of
% algorithmic.sty can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
% There is also a support site at:
% http://algorithms.berlios.de/index.html
% Also of interest may be the (relatively newer and more customizable)
% algorithmicx.sty package by Szasz Janos:
% http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/




% *** ALIGNMENT PACKAGES ***
%
%\usepackage{array}
% Frank Mittelbach's and David Carlisle's array.sty patches and improves
% the standard LaTeX2e array and tabular environments to provide better
% appearance and additional user controls. As the default LaTeX2e table
% generation code is lacking to the point of almost being broken with
% respect to the quality of the end results, all users are strongly
% advised to use an enhanced (at the very least that provided by array.sty)
% set of table tools. array.sty is already installed on most systems. The
% latest version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/tools/


%\usepackage{mdwmath}
%\usepackage{mdwtab}
% Also highly recommended is Mark Wooding's extremely powerful MDW tools,
% especially mdwmath.sty and mdwtab.sty which are used to format equations
% and tables, respectively. The MDWtools set is already installed on most
% LaTeX systems. The lastest version and documentation is available at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/


% IEEEtran contains the IEEEeqnarray family of commands that can be used to
% generate multiline equations as well as matrices, tables, etc., of high
% quality.


%\usepackage{eqparbox}
% Also of notable interest is Scott Pakin's eqparbox package for creating
% (automatically sized) equal width boxes - aka "natural width parboxes".
% Available at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/





% *** SUBFIGURE PACKAGES ***
\usepackage[tight,footnotesize]{subfigure}
% subfigure.sty was written by Steven Douglas Cochran. This package makes it
% easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
% work, it is a good idea to load it with the tight package option to reduce
% the amount of white space around the subfigures. subfigure.sty is already
% installed on most LaTeX systems. The latest version and documentation can
% be obtained at:
% http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
% subfigure.sty has been superceeded by subfig.sty.



%\usepackage[caption=false]{caption}
%\usepackage[font=footnotesize]{subfig}
% subfig.sty, also written by Steven Douglas Cochran, is the modern
% replacement for subfigure.sty. However, subfig.sty requires and
% automatically loads Axel Sommerfeldt's caption.sty which will override
% IEEEtran.cls handling of captions and this will result in nonIEEE style
% figure/table captions. To prevent this problem, be sure and preload
% caption.sty with its "caption=false" package option. This is will preserve
% IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later 
% (recommended due to many improvements over 1.2) of subfig.sty supports
% the caption=false option directly:
%\usepackage[caption=false,font=footnotesize]{subfig}
%
% The latest version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
% The latest version and documentation of caption.sty can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/caption/




% *** FLOAT PACKAGES ***
%
%\usepackage{fixltx2e}
% fixltx2e, the successor to the earlier fix2col.sty, was written by
% Frank Mittelbach and David Carlisle. This package corrects a few problems
% in the LaTeX2e kernel, the most notable of which is that in current
% LaTeX2e releases, the ordering of single and double column floats is not
% guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
% single column figure to be placed prior to an earlier double column
% figure. The latest version and documentation can be found at:
% http://www.ctan.org/tex-archive/macros/latex/base/



%\usepackage{stfloats}
% stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
% the ability to do double column floats at the bottom of the page as well
% as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
% LaTeX2e). It also provides a command:
%\fnbelowfloat
% to enable the placement of footnotes below bottom floats (the standard
% LaTeX2e kernel puts them above bottom floats). This is an invasive package
% which rewrites many portions of the LaTeX2e float routines. It may not work
% with other packages that modify the LaTeX2e float routines. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
% Documentation is contained in the stfloats.sty comments as well as in the
% presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
% does not allow \baselineskip to stretch. Authors submitting work to the
% IEEE should note that IEEE rarely uses double column equations and
% that authors should try to avoid such use. Do not be tempted to use the
% cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
% not format its papers in such ways.





% *** PDF, URL AND HYPERLINK PACKAGES ***
%
\usepackage{url}
% url.sty was written by Donald Arseneau. It provides better support for
% handling and breaking URLs. url.sty is already installed on most LaTeX
% systems. The latest version can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
% Read the url.sty source comments for usage information. Basically,
% \url{my_url_here}.





% *** Do not adjust lengths that control margins, column widths, etc. ***
% *** Do not use packages that alter fonts (such as pslatex).         ***
% There should be no need to do such things with IEEEtran.cls V1.6 and later.
% (Unless specifically asked to do so by the journal or conference you plan
% to submit to, of course. )


% correct bad hyphenation here
%\hyphenation{op-tical net-works semi-conduc-tor}




\begin{document}

\title{Implementing a High-Performance Recommendation System Using Phoenix++}
% conference papers do not typically use \thanks and this command
% is locked out in conference mode. If really needed, such as for
% the acknowledgment of grants, issue a \IEEEoverridecommandlockouts
% after \documentclass


\author{\IEEEauthorblockN{Chongxiao Cao*
%\thanks{*This is work done during an internship with Samsung Research America-Silicon Valley}
\thanks{*The work outlined in this paper was accomplished during an internship with Samsung Research America - Silicon Valley}
}
\IEEEauthorblockA{Innovative Computing Lab\\
University of Tennessee at Knoxville\\
ccao1@utk.edu}
\and
\IEEEauthorblockN{Fengguang Song}
\IEEEauthorblockA{Computer Science Lab\\
Samsung Research America\\
f.song@samsung.com}
\and
\IEEEauthorblockN{Daniel G. Waddington}
\IEEEauthorblockA{Computer Science Lab\\
Samsung Research America\\
d.waddington@samsung.com
}}


\maketitle


\begin{abstract}
Recommendation systems are important big data applications that are used in many business sectors of the global economy.
While many users utilize Hadoop-like MapReduce systems to implement recommendation systems,
we utilize the high-performance shared-memory MapReduce system Phoenix++ \cite{phoenix++} 
%to design a faster version of recommendation engine.
to design a faster recommendation engine.
In this paper, we design a distributed out-of-core recommendation algorithm to maximize the usage
of main memory, and devise a framework that invokes Phoenix++ as a sub-module to achieve high performance.
The design of the framework can be extended to support different types of big data applications.
The experiments on Amazon Elastic Compute Cloud (Amazon EC2) demonstrate that our new recommendation system
can be faster than its Hadoop counterpart by up to 225\% 
%meanwhile keeping the same good recommendation quality. 
without losing recommendation quality. 


\end{abstract}

% A category with the (minimum) three required fields
%\category{H.4}{Information Systems Applications}{Miscellaneous}
%A category including the fourth, optional field follows...
%\category{D.2.8}{Software Engineering}{Metrics}[complexity measures, performance measures]

%\terms{Big Data}
%\keywords{MapReduce, Recommendation System, Hadoop} % NOT required for Proceedings

\section{Introduction}
% 1 page
%-the emerging big data area
%-high performance recommendation system is an important application.
%-Currently, most people use Hadoop based solutions (easy but slow!)
%-We tried to use Phoenix++ and found it delivers much better performance than hadoop.
%   Figure: 1 to 64 core Phoenix VS 8-node Hadoop (word count).
%   That's why we plan to use Phoenix++ to design our new HPC recommendation system!!
%-Phoenix++ based (faster but support one-node only!)
%  -So we modified the existing algorithm to support not only out-of-core computation but also distributed systmes.
%  -Also tried a few optimizations.
%-Compared our program with Hadoop data-analytics library. Show better performance.
Big data is sweeping into more sectors of the global economy due to innovation in computer systems, storage devices, 
sensor networks, mobile devices, as well as sophisticated capabilities to analyze data. 
%For instance, as of June 2011, a user may spend $600$ dollars to buy a disk drive that can store all of the world's music \cite{mckinsey}.
%online users share 30 billion pieces of content on Facebook every month \cite{mckinsey}.
%There are various ways to use big data to create value, especially in the domains of
%retail, health-care, public section, manufacturing, and telecommunication.
A few important big data applications are: data mining, recommendation systems, enterprise analytics,  visualization, and so on
\cite{mckinsey}.
In this paper, we focus on designing a high-performance recommendation system.


%Paragraph: Importance of recommendataion system.
Recommendation systems are typically used to predict a user's response given a list of options.
Many companies such as Google, Amazon, Netflix, and the New York Times, have developed their own
recommendation engines to offer customers options of what products, books, or articles they might like.
Recommendation systems can be divided into two broad categories: 
{\it content-based systems} and {\it collaborative filtering systems} \cite{data-mining-book}.
Content-based systems recommend items that contain elements (e.g., text, image, movie star) similar to those the user liked in the past.
Collaborative filtering systems, on the other hand, recommend items based on ``closeness'' between users or between items.
Although many other recommendation systems exist, 
the collaborative filtering systems so far have been thought of as the best recommendation systems \cite{recommend-sys-survey, IBCF-alg}.
%It requires users (or customers) to rate a subset of items. 
%Based on the relationship between a subset of items, it can recommend an item that
%a user has not purchased, but is similar to what he or she liked in the past.
Our work builds upon ``item-based collaborative filtering recommendation algorithms''
and supports both shared-memory multicore systems and distributed-memory clusters.


%Paragrap: How recommendation system is implemented so far?  %We quickly do experiment to compare Phoenix++ vs Hadoop.
Recommendation systems are often implemented with the MapReduce programming model due to the model's
simplicity and robustness. 
Users can simply write some MapReduce jobs that contain a {\it map} function and a {\it reduce} function, 
then combine those 
jobs together and implement a recommendation system quickly \cite{ibcf-hadoop-2011, ibcf-sim-hadoop-2012}. 
The most widely used MapReduce framework is 
Apache Hadoop \cite{hadoop}, which supports distributed-memory clusters. 
On the other hand, researchers have designed MapReduce systems on shared-memory multicore systems 
(e.g., Phoenix++ from Stanford University \cite{phoenix++}).

In order to realize a high performance recommendation system, we first need to choose a faster MapReduce framework.
%We compared the shared-memory framework of Phoenix++ 
%with the distributed-memory framework of Hadoop.
Figure \ref{fig:wc} compares the performance of Phoenix++ and Hadoop for 1GB {\tt word count} running on Amazon Elastic Compute Cloud (Amazon EC2). Both experiments are executed on one compute
node (i.e., one worker node in Hadoop) with a different number of virtual CPUs.
%For Hadoop experiment, we made used of Amazon Elastic MapReduce (Amazon EMR), which provided Hadoop version 1.0.3, and set {\tt m1.small} instance  as the master node. 
%We increased the numbder of virtual CPUs (vCPUs) to compare the performance 
%between Phoenix++ and Hadoop. 
The result demonstrates that Phoenix++ can outperform Hadoop greatly 
(note the log-scale in the y-axis). 
For instance, Phoenix++ is faster than Hadoop by $28.5 \times$ on four virtual CPUs, 
for $7.4$ seconds versus $211$ seconds.

\begin{figure}[t]
\centering
%\includegraphics[width=\linewidth]{wc}
\includegraphics[width=0.75\linewidth, trim=0.1cm 0.2cm 0.1cm 0.1cm, clip=true]{wc}
\caption{Experiment of 1 GB {\tt word count} using Phoenix++ and Hadoop. The y-axis is in a logarithmic scale.}
\label{fig:wc}
\end{figure}

%Paragraph: Overview and Good Introduction to our solution!!
Phoenix++ provides much better performance than Hadoop; however, it does not support clusters and assumes 
the input size is less than the main memory size.
The goal of our work is to build a high performance recommendation system for both shared-memory and distributed-memory systems.
%The new recommendation system utilizes Phoenix++'s in-memory processing capability to outperform Hadoop greatly.
In our approach, we design a distributed out-of-core item-based collaborative filtering recommendation algorithm 
and build a framework that views and invokes Phoenix++ as a black box.
%Our new system can process big data recommendations in four types of scenarios:
%i) single node and in-core, ii) single node and out-of-core,
%iii) multiple nodes and in-core, and iv) multiple nodes and out-of-core.
%Here ``in-core'' and ``out-of-core'' mean whether the entire input can be stored in memory or not.
%Depending on a computing platform's memory size, our recommendation system is able to adapt
%to one of the four scenarios.
Our new recommendation system can adapt to the following four types of scenarios:
i) single node and in-core, ii) single node and out-of-core,
iii) multiple nodes and in-core, and iv) multiple nodes and out-of-core.
Here ``in-core'' and ``out-of-core'' mean whether the entire input can be stored in memory or not.
%Depending on a computing platform's memory size, our recommendation system is able to adapt
%to one of the four scenarios.
%We have done experiments on Amazon EC2.
The experimental results on an Amazon EC2 cluster show that 
our new recommendation system can achieve up to 225\% speedup over
the de facto standard machine-learning library on Hadoop (i.e., Apache Mahout \cite{Mahout}).
%on an Amazon EC2 cluster consisting of eight compute nodes.

This paper outlines the following contributions:
\begin{itemize}
\item A distributed out-of-core recommendation algorithm to maximize the usage of main memory. %(at the same time to minimize the overhead of disk I/O).
% I commented this out because our method copies input matrix to every node to minimize I/O communication in NFS, this is caused by file system architecture. The 
% algorithm itself doesn't have advantages to minimize disk I/O. Actually the out-of-core matrix multiplication increase the disk I\O compared with one time load.
\item A new high-performance recommendation system that works on both shared-memory manycore systems and distributed-memory clusters. It outperforms Apache Mahout by up to 225\%.
\item A general methodology to extend shared-memory MapReduce frameworks (e.g., Phoenix++) to distributed memory clusters regardless
of memory sizes.
\end{itemize} 
 
The rest of this paper is structured as follows. Section \ref{background} explains the background for our work in more detail. Section \ref{design} 
presents the design of our recommendation system. Section \ref{experiments} shows the experimental results. Section \ref{related} presents related work.
%Section \ref{conclusion} summarizes and concludes our work.
Section \ref{conclusion} concludes our work and Section \ref{future} summarizes the future work.

\section{Background}\label{background}
%Many big data applications are implemented using the MapReduce programming model.
This section briefly introduces the MapReduce programming model with two of its popular implementations,
and also introduces the item-based collaborative filtering (CF) algorithm.  

\subsection{MapReduce}
The MapReduce programming model inherits the concept of {\it map} and {\it reduce} from functional
programming languages to support parallel and distributed computing.
Given a ``daunting'' task to develop large-scale parallel applications, 
a user can simply provide a {\it map} and a {\it reduce} function to fulfill his or her goal quickly.
The two functions are as follows:

%\noindent $\textbf{Map}: (\emph{key1}, \emph{value1}) \rightarrow list(\emph{key2}, \emph{value2})$
$\textbf{Map}: (\emph{key1}, \emph{value1}) \rightarrow list(\emph{key2}, \emph{value2})$

%\noindent $\textbf{Reduce}: (\textit{key2}, list(\textit{value2})) \rightarrow list(\textit{value2})$
$\textbf{Reduce}: (\textit{key2}, list(\textit{value2})) \rightarrow list(\textit{value2})$

A {\it map} function consumes an input pair of $<$\textit{key1}, \textit{value1}$>$ and produces a new set of intermediate $<$\textit{key2}, \textit{value2}$>$.
Next, the runtime system groups all the intermediate \textit{value2} based on the same \textit{key2}.
Finally, a {\it reduce} function takes as input an intermediate \textit{key2} and its \textit{value2} list,
and produces an output \cite{google-mapreduce}.
The MapReduce model not only makes parallel programming much easier, 
but also automatically supports fault tolerance, parallelization, data distribution, and load balancing.
% insert a figure of MapReduce if pages are not enough

\subsubsection{Hadoop implementation}
%-Briefly introduce hadoop
Hadoop is an open-source software framework that is implemented in Java. A Hadoop cluster consists of a single master node and multiple worker nodes. 
The master node is responsible for scheduling the split tasks of a MapReduce job to the worker nodes, monitoring them, and re-executing the failed tasks. The worker nodes execute 
the tasks as directed by the master.

The Hadoop Distributed File System (HDFS) is a distributed, scalable, and portable file system in the Hadoop framework. It is designed for storing very large files with streaming data access
patterns, running on clusters on commodity hardware \cite{hadoop-definitive-guide}. Files in HDFS are split into blocks (64 MB by default), each block is replicated multiple times and distributed into different data nodes.

\subsubsection{Phoenix++ implementation}
%-Introduce phoenix++
Phoenix++ is a C++ implementation of the MapReduce programming model for shared-memory multicore systems. It allows users to write high-performance 
code easily, with scalability comparable to hand-coded pthreads solutions. The bottlenecks of the shared-memory MapReduce programs
lie in the intermediate key-value data layout, memory allocation 
pressure, and framework overhead. To solve these bottlenecks, Phoenix++ provides a flexible intermediate key-value storage abstraction and an effective combiner to minimize the memory usage. Users can also adapt them to particular characteristics of a workload\cite{phoenix++}.

\subsection{Item-based Collaborative Filtering (CF) Algorithm}
%The CF algorithm is actually motivated by the idea that people often get the best recommendation from someone with a similar taste. In a typical CF recommendation system, there is a list
The CF algorithm is motivated by the idea that people often get the best recommendations from someone with a similar taste. In a CF recommendation system, there is a list
of \emph{m} users $U = \{u_{1},u_{2}...u_{m}\}$ and a list of \emph{n} items $I = \{i_{1},i_{2}...i_{n}\}$. Each user $u_{i} \in U$ gives some numerical ratings $r_{ij} \in R$ 
for the corresponding item $i_{j} \in I$, where $R$ is the rating set. 

The CF algorithm has two forms: user-based CF algorithm and item-based CF algorithm. 
%The user-based CF algorithm has two major 
%drawbacks: \emph{sparsity} and \emph{scalability} \cite{IBCF-alg}, while the item-based CF algorithm can solve the problems. 
In this paper, we adopt the item-based CF algorithm to implement our recommendation system 
because it usually outperforms the user-based CF algorithm in large and sparse datasets \cite{PCI}.

The idea behind an Item-based CF algorithm is that a user is most likely to purchase items, which are similar to the one he previously bought. The algorithm has two steps: 
\begin{itemize}
\item \textbf{Similarity Computation}: It computes the similarity between items and selects the most similar items.
%\item \textbf{Prediction Computation}: After the most similar items are found, the prediction is computed by taking the weighted average of the target user's 
%ratings on these similar items.
\item \textbf{Prediction Computation}: It computes the prediction by taking the weighted average of the target user's ratings on the most similar items.
\end{itemize} 

The basic idea to compute the similarity between item $i$ and item $j$ is to consider them as two user-rating vectors and 
then apply a vector-similarity computation to determine their similarity $S_{i,j}$. There are several ways to compute the similarity (e.g., cosine, Pearson correlation, adjusted cosine, and so on). 
Our work uses the Pearson correlation \cite{IBCF-alg} to measure the similarity $S_{i,j}$ as shown in Equation \ref{eq:pearson}. 
Here $U$ denotes the users who have both rated item $i$ and item
$j$, $R_{u,i}$ denotes the rating of user $u$ on item $i$, and $\overline{R}_{i}$ denotes average rating of the \textit{i-th} item.
\begin{equation}
S_{i,j}=\dfrac {\sum _{u\in U}\left( R_{u,i}-\overline {R}_{i}\right)\left( R_{u,j}-\overline {R}_{j}\right)} {\sqrt{\sum _{u \in U}\left(R_{u,i}-\overline{R}_{i}\right)^{2}} \sqrt{\sum _{u \in U}\left(R_{u,j}-\overline{R}_{j}\right)^{2}} }
\label{eq:pearson}
\end{equation}

To predict user $u$'s rating on item $i$, all the items similar to $i$ will be considered. 
The weighted sum method is used, where each rating is weighted by the corresponding similarity 
$S_{i,j}$ between items $i$ and $j$. The prediction $P_{u,i}$ can be computed as follows \cite{IBCF-alg}:
\begin{equation}
P_{u,i}=\dfrac {\sum _{\text{all similar items}, N}\left(S_{i,N}*R_{u,N}\right)} {\sum_{\text{all similar items}, N}\left(|S_{i,N}|\right) }
\label{eq:prediction}
\end{equation}



\section{Design of Our Recommendation System}
\label{design}
%1 page
%A little bit of overview here.
In this section, we first introduce how to extend the item-based CF algorithm to support the out-of-core scenario 
and distributed memory clusters. Then we introduce how to implement the 
algorithm using Phoenix++. Finally, we discuss potential issues of our recommendation system.

\subsection{The Extended Item-based CF Algorithm}\label{model}
%Pure algorithm, which is able to support out-of-core, multiple nodes
\subsubsection{Formulating the Problem}
The Item-based CF algorithm can be transformed to the form of matrix computation \cite{ibcf-sim-hadoop-2012}. 
Let $A$ be a $|U|\times|I|$ rating matrix, where $U$ denotes a set of users and $I$ denotes a set of items. 
%In addition, each row $A_{u\bullet}$ represents a user $u$ and each column $A_{\bullet j}$ represents an item $j$.
In addition, let $A_{u\bullet}$ denote the $u$-th row of matrix $A$ and $A_{\bullet j}$ denote the 
$j$-th column of matrix $A$.  Each row $A_{u\bullet}$ represents a user $u$, each column $A_{\bullet j}$ represents an item $j$, and $A_{u,j}$ represents user $u$'s rating on item $j$.

Let $S$ be a $|I|\times|I|$ similarity matrix and $S_{i,j}$ denote the similarity between item $i$ and item $j$.
$S_{i,j}$ is computed by using Pearson correlation in Equation \ref{eq:pearson} between column $A_{\bullet i}$ 
and column $A_{\bullet j}$.
In our approach, we overload the dot product operator in matrix multiplication 
with Pearson correlation.
The similarity matrix $S$ can be computed by the following matrix multiplication:
\begin{equation}
S = A^{T}A
\label{eq:sim_mul}
\end{equation}
%The similarity matrix $S$ can be computed by the following matrix multiplication:
%\begin{equation}
%S = A^{T}A
%\label{eq:sim_mul}
%\end{equation}
%In our approach, we overload the dot product operator in matrix multiplication 
%with Pearson correlation shown in equation \ref{eq:pearson}, 

Similarly, let $P$ be a $|U|\times|I|$ prediction matrix and $P_{u,i}$ denote the prediction of user $u$ on item $i$. 
We also overload the dot product operator in matrix multiplication with the weighted sum method in Equation \ref{eq:prediction}, 
after which $P$ can be computed by the rating matrix $A$ multiplying the similarity matrix $S$:
\begin{equation}
P = AS
\label{eq:pre_mul}
\end{equation}

The entire item-based CF recommendation algorithm can be represented as two matrix multiplications:
\begin{equation}
P = A(A^{T}A)
\label{eq:total_mul}
\end{equation}

\subsubsection{Distributed Memory Cluster Support}
After modeling the item-based CF algorithm as matrix multiplications, 
the next step is to distribute the computation to different compute nodes to execute in parallel.
Figure \ref{fig:dist} explains how our parallelization is implemented. 
The rating matrix $A$ is divided into different column panels $A_{i}$ evenly and distributed to every compute node $i$. 
Every compute node $i$ executes two steps of computation based on $A_{i}$ simultaneously. 
In the step of similarity computation, compute node $i$ computes similarity matrix panel $S_{i}$.
In the step of prediction computation, compute node $i$ computes prediction matrix panel $P_{i}$.
Because every compute node produces its own similarity matrix panel needed for prediction, there is no global barrier for all compute nodes between these two steps.
%In the step of similarity computation, the rating matrix $A$ is partitioned into different column panels 
%$A_{i}$ averagely, and each compute node $i$ computes the similarity matrix panel $S_{i}$. 
%In the step of prediction computation, each compute node $i$ only computes its own prediction matrix panel $P_{i}$ based on
%the rating matrix $A$ and its similarity matrix panel $S_{i}$.
%Because every compute node produces its own similarity matrix for prediction, there is no global barrier needed between the similarity computation step and the prediction step.
\begin{figure}[th]
\centering
\includegraphics[width=0.8\linewidth, trim=0.5cm 3cm 1cm 2cm, clip=true]{dist}
\caption{Extending the item-based CF algorithm to support distributed cluster.}
\label{fig:dist}
\end{figure}

\subsubsection{Out-of-core Support}\label{single-ooc}
After dividing the work into different compute nodes, the data size on each node may still be larger than the memory size, which makes in-memory computation infeasible. 
Figure \ref{fig:ooc} shows how we deal with the out-of-core cases. 
This strategy can be adapted in both steps of similarity computation and prediction computation. 
%Without loss of generality, 
Assume the input is matrix $A$ and matrix $B$, and the output is matrix $C$, also their total size is larger than the memory size. 
Matrix $A$ is partitioned into $m$ row strips by the
block size $block\_row$, while matrix $B$ is partitioned into n column strips by the block size $block\_col$.
%each time only $block\_row \times block\_col$ elements of matrix $C$ are
Each computation reads one row strip from matrix $A$ and one column strip from matrix $B$ 
into memory, 
produces a $block\_row \times block\_col$ block in matrix $C$ and then writes back to disk. 
%computed in memory and then written back to disk. 
After $m\times n$ times of in-memory computations, the computation for matrix $C$ is completed.
\begin{figure}[b]
\centering
\includegraphics[width=0.8\linewidth, trim=0cm 1cm 1cm 0cm, clip=true]{ooc}
\caption{Extending the item-based CF algorithm to support the out-of-core scenario.}
\label{fig:ooc}
\end{figure}

\subsection{Our Implementation Using Phoenix++}
%Many implementation details such as design, implementation, file read/write, etc...
We implement the in-memory MapReduce computation with Phoenix++. 
%As section \ref{model} mentioned, all the computation has been transformed into matrix multiplication,
There are two typical ways to compute matrix multiplication in MapReduce:
\begin{itemize}
\item \textbf{Inner product}: Each Map task computes an element of the output matrix based on the inner product 
%of a matrix row and a matrix column. No reduce tasks are needed.
of a row from the left matrix and a column from the right matrix. No reduce tasks are needed.
\item \textbf{Outer product}: The $i$-th Map task computes the outer product of the $i$-th column of the left matrix and the $i$-th row of the right matrix. Each Reduce task simply sums up all the partial results.
\end{itemize} 
The inner product method is highly efficient as no reduce task is required, 
which obviates the overhead for shuffling intermediate keys and values.
%which obviates the need for the {\tt shuffle} phase between the {\tt map} phase and the {\tt reduce} 
%phase and its associated sorting and cache miss overhead. 
We implement the similarity computation and the prediction computation as two MapReduce jobs using the inner product method 
with Phoenix++. Each Map task computes $100\times100$ elements of the output matrix. 
This can be further tuned to attain a better performance.

The rating matrix $A$ is very sparse in the real cases, thus we store it as a sparse matrix.
%we used compressed sparse column (CSC) format and compressed sparse row (CSR) format to store it, the former is used in 
%similarity computation as $A^{T}$ and the latter is used in prediction. 
Network File System (NFS) is currently used in our Phoenix++ cluster. 
In order to minimize the I/O communication in NFS, the rating matrix $A$ is copied to each compute node.

The execution steps of our recommendation system are as follows (see Figure \ref{fig:steps}):
\begin{itemize}
\item The input is a rating file with simple space-separated value format like ``user\_id item\_id rating''. 
The preprocessing step transforms the input into a sparse matrix format, partitions it, and sends it to every compute node.
\item The similarity computation step computes a partial similarity matrix by using Phoenix++ on every compute node.
\item The sort step sorts elements in every column of partial similarity matrix $S_{i}$, and prunes the elements that are less 
than the similarity threshold. The most similar 
items are selected based on a neighborhood size after sorting.
\item The prediction step computes the potential ratings of unrated items for every user by using Phoenix++ on every compute node. 
\end{itemize}
\begin{figure}[th]
\centering
\includegraphics[width=0.8\linewidth]{steps}
\caption{Execution steps in our recommendation system.}
\label{fig:steps}
\end{figure}


\subsection{Potential Issues}\label{issues}
%space, load balance, 
\subsubsection{Storage Efficiency}
For the purpose of minimizing the I/O communication in NFS, 
we copy the input rating matrix to every node, which makes the storage inefficient in the disk. 
Utilizing a parallel file system is a feasible solution to this problem in the future. 
%A future solution to this issue is to use distributed file system.
%A future solution to this issue is to use parallel file system.
\subsubsection{Load Balance}
Currently, data is divided into column panels evenly, the scalability of our recommendation system is not close to optimum if ratings are not uniformly distributed in every column. Those nodes receiving popular rated items would perform more computations than others and become the bottleneck. 
A fine-grained data distribution method like 1-D column cyclic distribution is a feasible way to achieve load balance on in our future work.
%A fine-grained data distribution method like 1-D column cyclic distribution is a feasible way to achieve load balance on every node and solve this issue in our future work.
%A feasible way is to implement load balance use 1-D column block cyclic distribution to partition data in future work.
%One solution is to distribute data in 1-D column cyclic way in future work.

\section{Experimental Results}
\label{experiments}
In this section we conducted experiments on Amazon EC2
to measure the prediction quality and performance of our recommendation system
(we call it {\em dist-phoenix++}).
We also compared our system with the item-based CF algorithm in Apache Mahout. 
Both real-world datasets and randomly generated datasets were tested.

\subsection{Apache Mahout}
Mahout is an open source machine learning library from Apache. It is written in Java and primarily focused on recommendation engines, clustering, and classification.
Its core algorithms are implemented on top of Hadoop so that it can scale on large datasets \cite{Mahout}. 
The item-based CF algorithm implemented in Mahout is compared with our dist-phoenix++.

% 1.5 page
%A section to introduce Mahout first,
%Experimental setup second.
\subsection{Experiment Setup}
Our experiments were conducted on Amazon EC2. We used a \textit{m1.xlarge} instance as a compute node, which is a 64-bit virtual machine with four virtual CPUs and 15 GB memory.

We built an eight-nodes cluster for dist-phoenix++.
%and used the NFS file system and MPI (MPICH2) to launch processes on different nodes. 
NFS was used as file system and MPI (MPICH2) was used to launch processes on different nodes.

For experiments with Mahout, we used the existing Amazon Elastic MapReduce (Amazon EMR) directly.
%For experiments with Mahout, we used Amazon Elastic MapReduce (Amazon EMR) directly.
The Hadoop version on Amazon EMR is $1.0.3$. 
%We allocated eight \textit{m1.xlarge} instances as compute nodes (i.e., worker node in Hadoop) 
We allocated eight \textit{m1.xlarge} instances as compute nodes
and one \textit{m1.xlarge} instance as the master node. Amazon Simple Storage Service (Amazon S3) was 
the file system used in this Hadoop cluster.

\subsection{Prediction Quality}
\begin{figure}[th]
\centering
%\includegraphics[width=\linewidth]{quality}
\includegraphics[width=0.75\linewidth, trim=0.1cm 0.2cm 0.1cm 0.1cm, clip=true]{quality}
\caption{Prediction quality on MovieLens 100k dataset.}
\label{fig:quality}
\end{figure}
To test the quality of recommendation system, we used the MovieLens 100k dataset \cite{movielens}. This dataset contains $100,000$ ratings by 943 users on 1682 movies. 
%80\% of the data was used in training set and 20\% of the was used in testing.
Both Mahout and our dist-phoenix++ system use Pearson Correlation as the similarity measurement, where
the similarity threshold is $0.01$.
The prediction quality is measured by \textit{Mean Absolute Error} (MAE):
\begin{equation}
MAE=\dfrac {\sum _{i=1}^{N}|p_{i}-q_{i}|} {N}
\label{eq:MAE}
\end{equation}
Here $p_{i}$ is the predicted value and ${q_{i}}$ is the real value. 
In Figure \ref{fig:quality}, 80\% of the data was used as a training set and 20\% 
of the data was used as a testing set. Total five different cases were tested. 
The default recommender in Mahout has the 
worst accuracy, because, by default, Mahout sets the similarity neighborhood size to $100$ and only $10$ items are considered for every user when doing prediction. In our dist-phoenix++ recommendation system, we set 
the similarity neighborhood size to the total number of items and all the items a user has rated will be 
considered during prediction. The MAE of dist-phoenix++ varies from $0.82$ to $0.85$. After 
tuning Mahout with the same setting as dist-phoenix++, its accuracy improves substantially 
%and the MAE was from $0.76$ to $0.79$. 
to nearly $0.77$. 

In the following performance experiments, for both dist-phoenix++ and Mahout, 
we use $100$ as the similarity neighborhood size and $0.01$ as the similarity threshold.
% and all rated items will be considered in prediction.


%4 figures
% 1) On one node: compare performance and prediction accuracy
%    i)  in-memory: quality comparison  (real data)
%    ii) in-memory: performance comparison (real data)
%    iii)in-memory: impact of RAM size on the overall performance (random matrix)
% 2) Scalability: 1, 2, 4, 8 nodes
%   Suppose a node has 1GB memory (to save time).
%   Input size could be 6 GB, run the same input on 1, 2, 4, (8?) nodes
%   to show the scalability curve (random matrix).

\subsection{Single Compute Node Performance}
We first compare the performance on a single compute node when the data size is small enough to fit into memory. 
All $100,000$ ratings in MovieLens 100k dataset were used as input. The jester-3 dataset was also selected, which contains $24,938$ users' ratings on $101$ jokes \cite{jester3}. 
In dist-phoenix++, all the data was loaded into memory only once and computed by Phoenix++. 
As shown in Figure \ref{fig:inmem}, 
dist-phoenix++ outperforms Mahout greatly due to the advantage of in-memory computation. 

Note that both Mahout tests ran for more than $10$ minutes. 
This is because the launching overhead of one MapReduce job on Amazon EMR takes around $1$ minute. 
And the item-based CF algorithm in Mahout contains $10$ MapReduce jobs.
This overhead can be reduced as the input data size increases. 

%Both of the running time of $2$ tests on Mahout are more than $10$ minutes, this is because the Item-based CF algorithm in Mahout contains $10$ MapReduce jobs and each of them needs nearly $1$ minute to launch on Amazon EMR. 
%This overhead can be dismissed when we increase input data 
%size to do large data applications.  

\begin{figure}[th]
\centering
%\includegraphics[width=\linewidth]{inmem}
\includegraphics[width=0.73\linewidth, trim=0.1cm 0.1cm 0.1cm 0.1cm, clip=true]{inmem}
\caption{Performance on single compute node when data can fit into memory.}
\label{fig:inmem}
\end{figure}

To investigate the impact of disk I/O on the overall performance, 
we varied the size of the data that can be loaded into memory.
The dataset we used is MovieLens-10M, 
which contains $10,000,054$ ratings applied to $10,681$ movies and $71,567$ users \cite{movielens}.
%We tried different size for block sizes $block\_row$ and $block\_col$ mentioned in section \ref{single-ooc} to check the performance change.
As shown in Figure \ref{fig:single-ooc}, $100\%$ means all the input data is loaded into memory once, $50\%$ means each time $50$\% of input data is loaded into memory,
and so forth for $25\%$ and $12.5\%$. 
When disk I/O increases, the execution time of dist-phoenix++ increases 
slightly, and all of them are much faster than Mahout.

\begin{figure}[th]
\centering
%\includegraphics[width=\linewidth]{single-ooc}
\includegraphics[width=0.73\linewidth, trim=0.1cm 0.1cm 0.1cm 0.1cm, clip=true]{single-ooc}
\caption{Performance on single compute node for MovieLens 10M dataset when block size changes.}
\label{fig:single-ooc}
\end{figure}

\subsection{Multiple Compute Nodes Performance}
%The scalability requirement of parallel processing systems need the computation speedup to be proportional to the number of compute nodes. 
To investigate the multiple compute nodes performance of our recommendation system, we 
increased the number of compute nodes from 1 to 8. Also, we simply copied MovieLens-10M dataset $10$ times to get a 100M ratings dataset applied to 
$10,681$ movies and $715,670$ users. 
The out-of-core block sizes $block\_row$ and $block\_col$ mentioned in section \ref{single-ooc} were set to $2,000$ and $1,000$ in similarity computation, while $100,000$ and $1,000$ 
in prediction computation. 

Figure \ref{fig:100M-time} and Figure \ref{fig:100M-speedup} show the running time and scalability of dist-phoenix++ versus Mahout. For $8$ compute nodes, the running time of dist-phoenix++ is $1.07$ hours, compared with $2.4$ hours in Mahout, it gets a $2.25 \times$ speedup. 

However, the scalability of dist-phoenix++ is not as good as Mahout. 
It has a $2.82 \times$ speedup when increasing the number of compute nodes from $1$ to $8$.
%while Mahout gets $7.05 \times$ speedup. 
We believe this is caused by the load balancing issue explained in Section \ref{issues}.
%the ratings of original MovieLens dataset is not uniformly distributed, 
In the MovieLens dataset, popular rated movies are associated with a small number of items 
%and are actually distributed to the first compute node. This makes the first node 
and are distributed to the first compute node. This makes the first node 
execute more computation than the other nodes and hence become the bottleneck.

\begin{figure}[h]
\begin{subfigure}[Running time for a growing number of compute nodes.]{
	%\includegraphics[width=0.6\linewidth]{100M_time}
    	\includegraphics[width=0.462\linewidth, trim=0.1cm 0.1cm 0.1cm 0.1cm, clip=true]{100M_time}
	\label{fig:100M-time}
}
\end{subfigure}
\begin{subfigure}[Speedup for a growing number of compute nodes.]{
	%\includegraphics[width=0.6\linewidth]{100M_speedup}
    \includegraphics[width=0.462\linewidth, trim=0.1cm 0.1cm 0.1cm 0.1cm, clip=true]{100M_speedup}
	\label{fig:100M-speedup}
}
\end{subfigure}
\label{fig:100M}
\caption{Performance of dist-phoenix++ and Mahout on 100M ratings dataset.}
\end{figure}

\begin{figure}[th]
\centering
%\includegraphics[width=\linewidth]{50M_speedup}
\includegraphics[width=0.75\linewidth, trim=0.7cm 0.2cm 0.7cm 0.7cm, clip=true]{50M_speedup}
\caption{Speedup for a growing number of compute nodes for dist-phoenix++ on a 50M ratings uniformly distributed dataset.}
\label{fig:50M-speedup}
\end{figure}

To further investigate the scalability of our dist-phoenix++, we created a uniformly distributed 50M ratings dataset. 
This dataset was randomly generated and 
contained $10,000$ items and $500,000$ users. Ratings were evenly distributed such that each item had $5,000$ ratings. 
The out-of-core block sizes were the same as the previous test.
Figure \ref{fig:50M-speedup} shows that dist-phoenix++ has a linear speedup. 
With $8$ compute nodes, dist-phoenix++ achieves a speedup of $7.44 \times$. 

%Figure \ref{fig:50M-speedup} shows the result and dist-phoenix++ gains $7.44 \times$ 
%speedup when increasing number of compute nodes from $1$ to $8$, which is close to the ideally linear speedup for increasing compute nodes.


%.5 page
\section{Related Work}
\label{related}
%shared-memory MR
%There are several MapReduce implementations that try to store the intermediate results to memory to reduce the disk I/O cost. 
%Phoenix++ is a MapReduce library that is designed for shared-memory multicore systems.
%It has demonstrated good performance comparable to hand-tuned parallel programs using pthreads \cite{phoenix++,phoenix}.
There are several shared-memory MapReduce libraries optimized for multicore architectures. %Besides Phoenix++, 
%Phoenix++ is a MapReduce library that is designed for shared-memory multicore systems \cite{phoenix++}.
Phoenix++ has 
demonstrated good performance comparable to hand-tuned parallel programs using pthreads \cite{phoenix++}.
Metis, from MIT, implements a new data structure (i.e., hash table and B+ tree) to group intermediate key/value pairs to provide high performance \cite{metis}.
Tiled-MapReduce extends the MapReduce model with a tiling algorithm \cite{tiled-mr}.
%The tiling algorithm divides a MapReduce task into a set of smaller subtasks and processes the subtasks iteratively.
%Processing the same subtask repeatedly results in a smaller memory footprint, better data locality, and task parallelism.
However, these libraries only work on shared-memory machines and assume the input must fit in the main memory.
Our work extends the shared-memory library to support distributed-memory clusters. 

%spark, twister
Spark is a tool developed at
the University of California at Berkeley to support in-memory cluster computing \cite{sparc}.
It provides a number of primitives to allow users to perform data mining efficiently. The primitives include
not only {\it map}, {\it reduce}, but also {\it union}, {\it join}, {\it filter}, {\it sample}, and so on.
Spark is often applied to iterative data mining applications. % and works on distributed-memory clusters.
It works on distributed-memory clusters but is not designed to handle disk I/O for intermediate results automatically.

Twister is another widely used MapReduce runtime system that supports iterative MapReduce computations \cite{twister}.
It first reads data from disks to local memories on distributed nodes, 
then starts to compute iteratively, and finally writes the output to disk. 
It is possible to extend Twister to store large intermediate results 
in local disks instead of buffering in memory. 
Unlike Spark and Twister, our approach is designed to scale up within a single node first, then
scale out to many nodes. 
%The idea behind our approach is that a high-performance single-node MapReduce
%can provide a high cluster performance as well.

%TODO: Introduce the two papers: map reduce based recommendation systems!!!!!
There has also been several work on implementing a recommendation system in MapReduce: Schelter et al. presented a recommendation 
system based on a scalable similarity-based neighborhood method \cite{ibcf-sim-hadoop-2012}. Jiang et al. presented a scaling-up Item-based CF algorithm using four 
MapReduce jobs \cite{ibcf-hadoop-2011}. However all of those implementations used Hadoop, 
whereas our work implemented the recommendation system in MapReduce with Phoenix++ 
on distributed-memory clusters.

%\section{Conclusion and Future Work}
\section{Conclusion}
\label{conclusion}
In this paper, we proposed a method to utilize shared-memory multicore MapReduce systems to
implement a distributed high-performance recommendation system. 
It is inspired by the fact that Phoenix++ can be faster than Hadoop by $28.5$ times on Amazon EC2 to run {\tt word count}.
To utilize the sub-module of Phoenix++, we extend the item-based collaborative filtering 
recommendation algorithm to support both distributed-memory clusters and out-of-core computations.
The experimental results show that the recommendation quality of our new system is
comparable to that of Apache Mahout, while the performance is faster than Mahout by up to $2.25$ times on eight compute nodes.
The reason we can achieve better performance is because the shared-memory MapReduce system is highly
optimized to conduct in-memory computations and can reduce the number of I/O operations greatly.
%Although our approach is proposed to utilize Phoenix++ to build a high performance recommendation system,

\section{Future Work}
\label{future}
We have proposed a new approach for utilizing Phoenix++ to build a high-performance recommendation system,
the same approach can be used to build other types of big data applications.
Our future work is to make the framework more general so that different application 
developers can use the framework as a library.


% The following two commands are all you need in the
% initial runs of your .tex file to
% produce the bibliography for the citations in your paper.
\bibliographystyle{IEEEtran}
\bibliography{IEEEabrv,paper}  % paper.bib is the name of the Bibliography in this case
% You must have a proper ".bib" file
%  and remember to run:
% latex bibtex latex latex
% to resolve all references
%
% ACM needs 'a single self-contained file'!
%
%APPENDICES are optional
%\balancecolumns
%\appendix
%Appendix A
%\section{Headings in Appendices}
%\balancecolumns
% That's all folks!
\end{document}
