\documentclass[conference]{IEEEtran}
% Add the compsoc option for Computer Society conferences.
%
% If IEEEtran.cls has not been installed into the LaTeX system files,
% manually specify the path to it like:
% \documentclass[conference]{../sty/IEEEtran}





% Some very useful LaTeX packages include:
% (uncomment the ones you want to load)


% *** MISC UTILITY PACKAGES ***
%
%\usepackage{ifpdf}
% Heiko Oberdiek's ifpdf.sty is very useful if you need conditional
% compilation based on whether the output is pdf or dvi.
% usage:
% \ifpdf
%   % pdf code
% \else
%   % dvi code
% \fi
% The latest version of ifpdf.sty can be obtained from:
% http://www.ctan.org/tex-archive/macros/latex/contrib/oberdiek/
% Also, note that IEEEtran.cls V1.7 and later provides a builtin
% \ifCLASSINFOpdf conditional that works the same way.
% When switching from latex to pdflatex and vice-versa, the compiler may
% have to be run twice to clear warning/error messages.






% *** CITATION PACKAGES ***
%
\usepackage{cite}
% cite.sty was written by Donald Arseneau
% V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
% \cite{} output to follow that of IEEE. Loading the cite package will
% result in citation numbers being automatically sorted and properly
% "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
% cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
% \cite will automatically add leading space, if needed. Use cite.sty's
% noadjust option (cite.sty V3.8 and later) if you want to turn this off.
% cite.sty is already installed on most LaTeX systems. Be sure and use
% version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
% not currently provide for hyperlinked citations.
% The latest version can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
% The documentation is contained in the cite.sty file itself.






% *** GRAPHICS RELATED PACKAGES ***
%
\ifCLASSINFOpdf
  % \usepackage[pdftex]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../pdf/}{../jpeg/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
\else
  % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
  % will default to the driver specified in the system graphics.cfg if no
  % driver is specified.
  % \usepackage[dvips]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../eps/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.eps}
\fi
% graphicx was written by David Carlisle and Sebastian Rahtz. It is
% required if you want graphics, photos, etc. graphicx.sty is already
% installed on most LaTeX systems. The latest version and documentation can
% be obtained at: 
% http://www.ctan.org/tex-archive/macros/latex/required/graphics/
% Another good source of documentation is "Using Imported Graphics in
% LaTeX2e" by Keith Reckdahl which can be found as epslatex.ps or
% epslatex.pdf at: http://www.ctan.org/tex-archive/info/
%
% latex, and pdflatex in dvi mode, support graphics in encapsulated
% postscript (.eps) format. pdflatex in pdf mode supports graphics
% in .pdf, .jpeg, .png and .mps (metapost) formats. Users should ensure
% that all non-photo figures use a vector format (.eps, .pdf, .mps) and
% not a bitmapped formats (.jpeg, .png). IEEE frowns on bitmapped formats
% which can result in "jaggedy"/blurry rendering of lines and letters as
% well as large increases in file sizes.
%
% You can find documentation about the pdfTeX application at:
% http://www.tug.org/applications/pdftex





% *** MATH PACKAGES ***
%
\usepackage[cmex10]{amsmath}
% A popular package from the American Mathematical Society that provides
% many useful and powerful commands for dealing with mathematics. If using
% it, be sure to load this package with the cmex10 option to ensure that
% only type 1 fonts will utilized at all point sizes. Without this option,
% it is possible that some math symbols, particularly those within
% footnotes, will be rendered in bitmap form which will result in a
% document that can not be IEEE Xplore compliant!
%
% Also, note that the amsmath package sets \interdisplaylinepenalty to 10000
% thus preventing page breaks from occurring within multiline equations. Use:
%\interdisplaylinepenalty=2500
% after loading amsmath to restore such page breaks as IEEEtran.cls normally
% does. amsmath.sty is already installed on most LaTeX systems. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/amslatex/math/





% *** SPECIALIZED LIST PACKAGES ***
%
%\usepackage{algorithmic}
% algorithmic.sty was written by Peter Williams and Rogerio Brito.
% This package provides an algorithmic environment fo describing algorithms.
% You can use the algorithmic environment in-text or within a figure
% environment to provide for a floating algorithm. Do NOT use the algorithm
% floating environment provided by algorithm.sty (by the same authors) or
% algorithm2e.sty (by Christophe Fiorio) as IEEE does not use dedicated
% algorithm float types and packages that provide these will not provide
% correct IEEE style captions. The latest version and documentation of
% algorithmic.sty can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
% There is also a support site at:
% http://algorithms.berlios.de/index.html
% Also of interest may be the (relatively newer and more customizable)
% algorithmicx.sty package by Szasz Janos:
% http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/




% *** ALIGNMENT PACKAGES ***
%
%\usepackage{array}
% Frank Mittelbach's and David Carlisle's array.sty patches and improves
% the standard LaTeX2e array and tabular environments to provide better
% appearance and additional user controls. As the default LaTeX2e table
% generation code is lacking to the point of almost being broken with
% respect to the quality of the end results, all users are strongly
% advised to use an enhanced (at the very least that provided by array.sty)
% set of table tools. array.sty is already installed on most systems. The
% latest version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/tools/


%\usepackage{mdwmath}
%\usepackage{mdwtab}
% Also highly recommended is Mark Wooding's extremely powerful MDW tools,
% especially mdwmath.sty and mdwtab.sty which are used to format equations
% and tables, respectively. The MDWtools set is already installed on most
% LaTeX systems. The lastest version and documentation is available at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/


% IEEEtran contains the IEEEeqnarray family of commands that can be used to
% generate multiline equations as well as matrices, tables, etc., of high
% quality.


%\usepackage{eqparbox}
% Also of notable interest is Scott Pakin's eqparbox package for creating
% (automatically sized) equal width boxes - aka "natural width parboxes".
% Available at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/





% *** SUBFIGURE PACKAGES ***
%\usepackage[tight,footnotesize]{subfigure}
% subfigure.sty was written by Steven Douglas Cochran. This package makes it
% easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
% work, it is a good idea to load it with the tight package option to reduce
% the amount of white space around the subfigures. subfigure.sty is already
% installed on most LaTeX systems. The latest version and documentation can
% be obtained at:
% http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
% subfigure.sty has been superceeded by subfig.sty.



%\usepackage[caption=false]{caption}
%\usepackage[font=footnotesize]{subfig}
% subfig.sty, also written by Steven Douglas Cochran, is the modern
% replacement for subfigure.sty. However, subfig.sty requires and
% automatically loads Axel Sommerfeldt's caption.sty which will override
% IEEEtran.cls handling of captions and this will result in nonIEEE style
% figure/table captions. To prevent this problem, be sure and preload
% caption.sty with its "caption=false" package option. This is will preserve
% IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later 
% (recommended due to many improvements over 1.2) of subfig.sty supports
% the caption=false option directly:
%\usepackage[caption=false,font=footnotesize]{subfig}
%
% The latest version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
% The latest version and documentation of caption.sty can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/caption/




% *** FLOAT PACKAGES ***
%
%\usepackage{fixltx2e}
% fixltx2e, the successor to the earlier fix2col.sty, was written by
% Frank Mittelbach and David Carlisle. This package corrects a few problems
% in the LaTeX2e kernel, the most notable of which is that in current
% LaTeX2e releases, the ordering of single and double column floats is not
% guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
% single column figure to be placed prior to an earlier double column
% figure. The latest version and documentation can be found at:
% http://www.ctan.org/tex-archive/macros/latex/base/



%\usepackage{stfloats}
% stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
% the ability to do double column floats at the bottom of the page as well
% as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
% LaTeX2e). It also provides a command:
%\fnbelowfloat
% to enable the placement of footnotes below bottom floats (the standard
% LaTeX2e kernel puts them above bottom floats). This is an invasive package
% which rewrites many portions of the LaTeX2e float routines. It may not work
% with other packages that modify the LaTeX2e float routines. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
% Documentation is contained in the stfloats.sty comments as well as in the
% presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
% does not allow \baselineskip to stretch. Authors submitting work to the
% IEEE should note that IEEE rarely uses double column equations and
% that authors should try to avoid such use. Do not be tempted to use the
% cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
% not format its papers in such ways.





% *** PDF, URL AND HYPERLINK PACKAGES ***
%
%\usepackage{url}
% url.sty was written by Donald Arseneau. It provides better support for
% handling and breaking URLs. url.sty is already installed on most LaTeX
% systems. The latest version can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
% Read the url.sty source comments for usage information. Basically,
% \url{my_url_here}.





% *** Do not adjust lengths that control margins, column widths, etc. ***
% *** Do not use packages that alter fonts (such as pslatex).         ***
% There should be no need to do such things with IEEEtran.cls V1.6 and later.
% (Unless specifically asked to do so by the journal or conference you plan
% to submit to, of course. )


% correct bad hyphenation here
\hyphenation{op-tical net-works semi-conduc-tor}


\begin{document}
\bibliographystyle{IEEEtran}
%
% paper title
% can use linebreaks \\ within to get better formatting as desired
\title{A Novel Centroid-based Classifier In Multi-label Text Classification}


% author names and affiliations
% use a multiple column layout for up to three different
% affiliations
\author{\IEEEauthorblockN{Aisha Al Zbeidi, Majid Khonji and Wen Shen}
\IEEEauthorblockA{Computing and Information Science\\
Masdar Institute of Science and Technology\\
PO Box 54224, Abu Dhabi, UAE\\
Email: \{aalzbeidi,mkhonji,wshen\} @masdar.ac.ae}
\and
\IEEEauthorblockN{Catherine Wilcox}
\IEEEauthorblockA{Water and Environmental Engineering\\
Masdar Institute of Science and Technology\\
PO Box 54224, Abu Dhabi, UAE\\
Email: cwilcox@masdar.ac.ae}
}

% conference papers do not typically use \thanks and this command
% is locked out in conference mode. If really needed, such as for
% the acknowledgment of grants, issue a \IEEEoverridecommandlockouts
% after \documentclass

% for over three affiliations, or if they all won't fit within the width
% of the page, use this alternative format:
% 
%\author{\IEEEauthorblockN{Michael Shell\IEEEauthorrefmark{1},
%Homer Simpson\IEEEauthorrefmark{2},
%James Kirk\IEEEauthorrefmark{3}, 
%Montgomery Scott\IEEEauthorrefmark{3} and
%Eldon Tyrell\IEEEauthorrefmark{4}}
%\IEEEauthorblockA{\IEEEauthorrefmark{1}School of Electrical and Computer Engineering\\
%Georgia Institute of Technology,
%Atlanta, Georgia 30332--0250\\ Email: see http://www.michaelshell.org/contact.html}
%\IEEEauthorblockA{\IEEEauthorrefmark{2}Twentieth Century Fox, Springfield, USA\\
%Email: homer@thesimpsons.com}
%\IEEEauthorblockA{\IEEEauthorrefmark{3}Starfleet Academy, San Francisco, California 96678-2391\\
%Telephone: (800) 555--1212, Fax: (888) 555--1212}
%\IEEEauthorblockA{\IEEEauthorrefmark{4}Tyrell Inc., 123 Replicant Street, Los Angeles, California 90210--4321}}




% use for special paper notices
%\IEEEspecialpapernotice{(Invited Paper)}




% make the title area
\maketitle


\begin{abstract}

%\boldmath
This paper presents an overview of text categorization techniques and a literature review of experiments conducted to improve them. Particular areas of interest include document representation, feature selection, centroid-based classifiers, and support vector machines (SVM). Different methods of feature selection and classification are compared. In addition, technical challenges of text categorization are identified. Based on literature review findings, the paper concludes with a proposed research topic of applying centroid-based classifiers to multi-label text classification. For the next stage of our project we propose a novel centroid-based classifier with a comparison of its performance with other classifiers such as \textit{k}NN and SVM in multi-label text classification tasks.
\end{abstract}
% IEEEtran.cls defaults to using nonbold math in the Abstract.
% This preserves the distinction between vectors and scalars. However,
% if the conference you are submitting to favors bold math in the abstract,
% then you can use LaTeX's standard command \boldmath at the very start
% of the abstract to achieve this. Many IEEE journals/conferences frown on
% math in the abstract anyway.

% no keywords




% For peer review papers, you can put extra information on the cover
% page as needed:
% \ifCLASSOPTIONpeerreview
% \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
% \fi
%
% For peerreview papers, this IEEEtran command inserts a page break and
% creates the second title. It will be ignored for other modes.
\IEEEpeerreviewmaketitle

\section{Introduction}
% no \IEEEPARstart

% You must have at least 2 lines in the paragraph with the drop letter
% (should never be an issue)

Text categorization is the task of categorizing or classifying a set of documents into classes based on the text content of the documents. It involves the task assigning one or more predefined categories to a specific document. In the text categorization literature, there are plenty of text categorization methods that vary in their methodology, accuracy and speed. However, these differences in methodologies and approaches of each classifier provide the user the option to select one suitable for a particular task and dataset conditions. Moreover, the literature on TC reports many proposals of improving these methods’ accuracies and speed which achieved good results and remarkable improvements over previous trials.

The importance of text categorization methods stems from the increasing amount of information stored in databases due to more advanced technologies. Such large numbers of documents would not be feasibly categorized by human beings. This phenomenon increased the need to automate the process of classifying documents instead of doing it manually in order to save money and time. Some of the existed applications of text categorization include spam filtering, assigning topics to documents, and language guessing, among other applications. In this paper, we will provide an overview of several text classification techniques and propose a novel centroid-based classifier in multi-label datasets.

\subsection{Reuters-21578 Dataset}

In this paper most of the discussed studies used the Reuters-21578 dataset which is currently widely used by researchers of text categorization. This dataset has been developed for research purposes and contains training and test collections of documents and articles that appeared on Reuters newswire in 1987. These documents and articles are pre-categorized with overlapping categories.
The importance of such a standard dataset is that researchers frequently use different datasets in their studies, which leads to results that are incomparable. However, the Reuters dataset is standardized for use in text categorization testing and other uses in machine learning studies. \cite{david1997reuters}

\section{Literature Review}

\subsection{Preprocessing and document representation}
Before a classifier can be used, the data must be altered into a more suitable form for running algorithms. In order to extract features from documents, the words in all documents are compiled in a list. The number of words is  reduced by stemming, which is considering only the root form of the word. For example, the words "computing," "computes," and "computation" would all be transformed into the stem "comput." Methods such as Porter’s stemming algorithm have been developed for this purpose.\cite{uguz2011two} We may eliminate those stems in $wordstemlist$ that have smaller length than $M$. 

We define $M$ to be the minimum word stem, $L$ the minimum number of documents the word stem can be in. We may also remove words stems that occur in less than $L$ documents.  Then we process each document or vector $x$ and eliminate non important word stems listed in a $stop-list$~\cite{1181211}. During this stage stop words (a, an, the, etc.) are removed from the text of documents, as they could lead to misclassification. A standard list consisting of 571 commonly recognized stop words in the English language has been developed. These preprocessing steps help to reduce the dimensionality before any additional dimension reduction methods are applied. \cite{uguz2011two}

 A document is then converted into a machine-readible representation, typically using the vector space model or the  bag-of-words (BOW) model where a document is a vector composed of a pre-fixed set of dictionary terms. BOW is the most common form of representation. Each component can be either be a Boolean variable or weight.  In most cases each feature or word stem corresponds to one dimension; the documents are plotted in this feature space. 

\subsubsection{Weighting methods in text representation}

In one method, weight can be defined by $v_w(d) = TF(w, d) log(D/DF(w))$ where $w$ is the word stem, $TF(w,d)$ is the number of word stem occurrence in the document $d$, $DF(w)$ is the number of documents that contain $w$, $D$ the number of documents. This gives a higher weight for words that are in few documents, making them quite informative in dealing with the problem of imbalanced data.\cite{1181211} 

In the text categorization literature, most of papers use either the appearance of the word or the frequency of its appearance as methods of describing the word. However, the conductors of another research study claimed that is not enough to classify the document correctly. \cite{xue2009distributional} They proved their claim by existing examples in the Reuters dataset where there are some documents have the same number of a specific word appearance while they do not belong to the same category. Therefore, they suggested adding distributional qualities, namely the compactness of the word's appearances and the first appearance. Compactness means the distribution of the word throughout the whole document. If the word is spread over the document in different paragraphs, the compactness is low, while it’s high if the word appearance is concentrated in a specific part. The compactness gives you an idea about how much this word is significant in classifying the document. When the word has low compactness, it means it has a scattered appearance through the document, and the chances that this word is the theme of the document are high. However, if the word has high compactness, means its appearance is concentrated in a specific part, the chances that this word is related to the theme of the document are low.

They produced some equations to calculate the term frequency (TF), compactness (CP) and first appearance (FA). They used three types of datasets: Reuters, newsgroups and WebKB. The results provided the conclusion that consideration of these distributional features bring significant improvements on long documents more than the sort documents. Furthermore, they found that these features has a better performance with the informal documents such as the documents in the Newsgroups and WebKB datasets because these documents are written in casual writing styles and the loosely related content is more likely to appear in these documents. However, Reuters documents are written by professionals in a professional way and the less related contents are less likely to appear in these documents.

\subsubsection{Semantic Analysis}
In most text categorization applications the words or features each correspond to a dimension. The document is then plotted in this feature space. None of the words are treated as having any meaning besides their statistical significance. However, in real human language the concepts generated by the words can be highly useful in classification.

In recent years researchers have attempted to derive meaning from the words by using semantic analysis (SA). Two main types of SA exist. Latent semantic analysis (LSA) uses mainly machine-based methods whose analysis is intelligible to human beings. On the other hand, explicit semantic analysis (ESA) relies more on humans to assign semantic meanings, but this is highly inefficient. Some researchers developed a new method called concise semantic analysis (CSA) which uses the human-generated terms in the category names to generate concepts. \cite{zhixing2011fast} Then, each document is plotted in that concept space. This naturally leads to a greatly reduced dimensionality, as each document set would have perhaps only a few hundred concepts. However, CSA is highly sensitive to imbalanced data, as underrepresented categories will provide less conceptual information. 

\subsection{Feature Selection}
Due to the high number of words in the entire corpus of a set of documents, the feature space for text categorization is typically very large. This could lead to poor classifier performance, especially ones particularly sensitive to the "curse of dimensionality" such as \textit{k}NN. Feature selection (FS) is a process used to reduce the dimensionality of a data set by selecting a smaller subset of features to use for classification. FS would take a long time to do for all data; some methods have been developed to reduce the number of trials required for FS, but these also lower optimization.\cite{aghdam2009text}

In general there are three types of FS: the filter approach, which is independent of any learning algorithm; the wrapper, which is tied to the task (i.e. classification) of the learning algorithm; and the embedded approach, with FS and the learning algorithm interleaved. Wrappers produce better results but are expensive to run. From these three types there are 5 main selection methods, which differ on how the feature subset is filled (starting with empty set versus starting with a full set with replacement, etc.). 

Before running FS, certain parameters and a maximum number of trials or iterations must be determined.\cite{aghdam2009text} Although the accuracy of different FS methods is mostly empirically observed, some researchers have argued that the selected method must have certain criteria such as favoring common terms, using category information, and using term frequency information. \cite{yan2010study}

This literature review covers some of the primary FS techniques. 

\subsubsection{Statistical Methods}
There are many statistical techniques that have been developed for feature selection. However, only a few methods have proven to be effective. One of the most commonly used methods is information gain (IG).  This statistical method, based in information theory, is used to rank terms in order of importance for classification. Once the features are ranked, a certain percentage of them are selected for use in classification. This method has been shown to greatly improve the performance of dimension-sensitive classifiers such as \textit{k}NN.\cite{uguz2011two} Other statistical methods used in feature selection include CHI, principle component analysis (PCA), term frequency, expected cross entropy, the weight of evidence of text, odds ratio, and mutual information.

\subsubsection{Ant Colony Optimization}
In real ant colonies, the ants try to find the quickest path to food. They leave a trail of pheromones on their way. Other ants find it, explore, add pheromones especially if the path is more direct to food. This develops a positive feedback loop where the more ants have used a certain path in the past, the more will continue to use it.

Obervations of ant colonies led programmers to develop the Ant Colony Optimization (ACO) algorithm. First, coders determine the population of ants, intensity of “pheromones” with any feature, and the maximum allowed iterations. Then ants are assigned randomly to a set of features. If by taking ten steps the ant does not decrease the MSE of the classifier, it will exit the feature set. The subsets discovered by all ants in the iteration are evaluated, and the best subset is selected. If the stop criterion is met, i.e., the maximum number of iterations has been reached, then the evaluation stops; otherwise it continues. The program then updates “pheromones” on the subsets discovered by the ants, allowing the best “ant” to have more pheromone than others. Finally, new ants are generated and the process is repeated until the maximum number of iterations is reached.
~\cite{aghdam2009text}.

\subsubsection{Genetic Algorithm}
The genetic algorithm (GA) is another non-statistical feature selection method, comparable to ACO. It follows the princples of biological genetics. First, a set of vectors (deemed "chromosomes") is generated across a set of points. Then the algorithm determines the "fitness" of each chromosome for classification. \cite{aghdam2009text} Selected chromosomes are reproduced via genetic principles to form a new population. The process is then repeated until a predetermined condition is met. The GA feature selection method provides good results in irregular, poorly understood feature spaces.

\subsubsection{Combining feature selection methods}
Researchers at Selcuk University wished to determine whether the accuracy of classification results would be improved with the use of two stages of dimension reduction. \cite{uguz2011two} Success was measured by precision, recall, and F-measure. They used the Reuters and Classic3 data sets to evaluate their results. As a control, the experimenters ran the \textit{k}NN and C4.5 decision tree classifiers on the non-reduced data. The C4.5 decision tree produced results with higher precision under this scenario.

 During the first stage of FS, the information gain (IG) method was used. The researchers tested both \textit{k}NN and C4.5 decision tree classifiers on a selection of the top 1-10 percent of features as determined by IG. They found that the results from this first stage of FS were improved over the base results, and the best F-measure values were achieved with \textit{k}NN.

For the second stage, the researchers compared the addition of two separate FS methods to IG: genetic algorithm (GA) and (PCA). They found that performing a two-stage feature selection produced more accurate results than IG alone, with \textit{k}NN outperforming the C4.5 decision tree. The two-stage method improved percentages on all measures of accuracy. \cite{uguz2011two}

\subsubsection{Comparison of feature selection methods}
The authors of the ACO study compared their algorithm for text feature selection with the genetic algorithm (GA) and two statistical FS methods: information gain (IG) and CHI. \cite{aghdam2009text} These two statistical methods were chosen in particular as literature has determined them to be the optimal performers under conditions relevant to this study. The study used precision and recall to evaluate performance, as well as F1. Both microaverages and macroaverages were used. Parameters for the methods were determined empirically in preliminary experiments; optimization was not performed. A nearest neighbor classifier was used to evaluate the effectiveness of the FS method.

The results of the study showed that ACO was the most accurate of the four FS methods, followed by GA, CHI, then IG. ACO was quicker than GA at locating the optimal solution. GA was much more negatively affected by the number of features in the data set. ACO works without prior knowledge about the features. ACO can be implemented in a few lines of code and has relatively low computational demands, making it a promising FS option. Future research includes the selection of optimal parameters, using more complex classifiers, applying ACO to other kinds of data sets, and combining ACO with other algorithms in parallel. \cite{aghdam2009text}

\subsection{Text categorizing other languages}

One study has proposed classification patterns for assigning topics to newswires automatically. The conductors of that study used the Reuters dataset in two languages, English and German. 

First of all, they created a dictionary process which is responsible for determining the features or attributes that will be used to represent the individual documents within a collection. In the second step, they mapped the documents to labels that identify its category via the dictionary. Afterwards, they searched for patterns in the label assignment process and evaluated them to find the best in terms of least error and least cost.

Finally, they claimed that they achieved a better performance in the English newswires, in spite of a promising result in the German newswires. They conclude that the optimized rule induction is a competitor among the machine learning techniques for document classification. \cite{apté1994towards} 

However, the low number of studies that go beyond the language barrier could indicate the existance of some challenges in categorizing text written in other languages.

\subsection{Centroid-based Classifier}
Centroid-based classifiers have been widely used in many web applications due to their computational efficiency. In centroid-based text classification, the documents are represented using the Vector Space Model (VSM). In this model, each document \textit{d} is considered to be a vector in the term-space.  Normalization of document length is often performed by calculating a term’s weight and scaling the vector to have L2-norm equal one, as is the most commonly used method. A traditional prototype vector is a delegate vector for each category, where a feature’s weight should be a form of weight combination of all documents in the category. 

To avoid over-fitting and high-computational complexity, many dimension reduction methods have been proposed for the term vector, such as stop words, stemming, word clustering, and document frequency\cite{guan2009class}. In its simplest form, each document is represented by the \textit{term-frequency}(TF) vector $d_{tf}$=($tf_{1}$,$tf_{2}$,...,$tf_{n}$), where $tf_{i}$ is the frequency of the \textit{i}th term in the document.

Given a class $C_{j}$ of a corpus, there are two classical methods to create $C_{j}$ ’s prototype vector:

(1) Arithmetical Average Centroid (AAC):


 \begin{center}
 $\overrightarrow{Centroid_{j}}$ = $\frac{1}{\mid C_j\mid }$	$\sum_{\substack{\overrightarrow{d}\in C_j }} \overrightarrow{d} $
 \end{center}
 
 
 
where the centroid is the arithmetical average of all document vectors of class $C_{j}$ . This is the most commonly used initialization method for centroid-based classifiers.

(2) Cumuli Geometric Centroid (CGC):

\begin{center}
$\overrightarrow{Centroid_{j}}$ = $\sum_{\substack{\overrightarrow{d}\in C_j }} \overrightarrow{d} $
\end{center}
 
 
where each term will be given a summation weight.


After centroids of different categories are determined, an unlabeled document is classified by finding the closest centroid to the document vector. The category of this centroid is then assigned to the test document. When the distance of two vectors is measured by their dot product, the testing process is to calculate

\begin{center}
$C^{'}=\operatorname*{arg\,max}_j (\overrightarrow{d}\bullet \overrightarrow{Centroid_{j}}  )$
\end{center}

The test document \textit{d} will be labeled as class $C^{'}$ .


Compared to other text categorization methods, centroid-based approaches are more serious with the problem of inductive bias or model misfit-classifiers tuned to the contingent characteristics of the training data rather than the constitutive characteristics of the categories. Centroid-based approaches are more susceptible to model misfit because of its assumption that a document should be assigned to a particular class when the similarity of this document and the class is the largest. In practice, this assumption often doesn’t hold (i.e., model misfit).

Many researchers have addressed this issue. Cachopo et al.~\cite{cardoso2007semi} proposed the combination of Expectation-Maximization with a centroid-based method to incorporate information about the unlabeled data during the training phase. Guan et al. ~\cite{guan2009class} designed a Class-Feature-Centroid (CFC) classifier motivated by weight-adjustment efforts for centroid-based classifiers. Tan et al.~\cite{tan2007using,tan2005using} proposed the Hypothesis-Margin Based Global Refinement(HMGR) method and the DragPushing method. We will introduce these methods in detail based on their papers during the following subsections.

\subsubsection{Method proposed by Elmarhumy et al.}
Elmarhumy et al. ~\cite{elmarhumy2009automatic} proposed the modified centroid classifier model. In the proposed model, they added the most similar training errors belonging to a certain class to its centroid to update it and discard the training errors that have low similarities with their class based on a certain threshold value. They exploited the Reuters-21578 collection as training and testing data and used classification accuracy for evaluation. The experimental results show that the proposed approach can slightly improve the performance of the centroid classifier. 
\subsubsection{Method proposed by Cachopo et al.}
In order to improve the accuracy of the centroid-based method, Cachopo and Olivira~\cite{cardoso2007semi} proposed the combination of Expectation-Maximization(EM) with a centroid-based method to incorporate information about the unlabeled data during the training phase. EM is a class of iterative algorithms for maximum likelihood estimation of hidden parameters in problems with incomplete data. In their case, they considered that the labels of the unlabeled documents were unknown and use EM to estimate these (unknown) labels.

Experiments show that their proposed approach can greatly improve accuracy relatively to a simple centroid-based method, in particular when there are very small amounts of labeled data.

They also show how a centroid-based method can be used to incrementally update the model of the data, based on new evidence from the unlabeled data. Using one synthetic dataset and three real-world datasets, they provided empirical evidence that, if the initial model of the data is sufficiently precise, using unlabeled data improves performance; on the other hand, using unlabeled data degrades performance if the initial model is not precise enough.
\subsubsection{The Class-Feature-Centroid Method}
Guan et al. argue that one of the reasons for the inferior performance of centroid-based classifiers is that centroids do not have good initial values. To solve this problem, many methods have been using feedback-loops to iteratively adjust prototype vectors, such as Dragpushing method, Hypothesis Margin method and Weight Adjustment method. These improved classifiers perform competitively compared to SVM classifiers. Motivated by the weight-adjustment efforts for centroid-based classifiers, they ~\cite{guan2009class} designed a Class-Feature-Centroid(CFC) classifier, which strives to construct centroids with better initial values than traditional centroids. In the CFC classifier, it first extracts inter-class and inner-class term indexes from the corpus. Then both indexes are carefully combined together to produce prototype vectors. Different from these previous approaches, this proposed method try to obtain good centroids during the construction phase such that their classification capability is still competitive compared to those derived from adaptive methods.

In the testing phase, CFC adopts a denormalized cosine measure, instead of a normalized prototype vector. This is to preserve prototype vectors' discriminative ability and enlarging effect. The experimental results on the skewed Reuters-21578 corpus and the balanced 20-newsgroup corpus demonstrate that the CFC classifier has a consistently better performance than SVM classifiers. In particular, CFC is more effective and robust than SVM when data is sparse.

\subsubsection{The Hypothesis-Margin Based Global Refinement Method}
To address the problem of inductive bias or model misfit incurred by the assumption of centroid classifier, Tan et al.~\cite{tan2007using} pointed out that some of the previous refinement methods employed only one criterion such as training-set error as its objective function. However, training-set error based objective function cannot guarantee the generalization capability of base classifiers and may lead to over-train over training data. To solve this problem, they introduced a new refinement strategy named as "Hypothesis-Margin Based Global Refinement(HMGR)" which uses both training-set errors and training-set margins as training criteria to build an global objective function over all training examples.

They conducted extensive experiments on four benchmark document corpora including Reuter-21578, 20 News Group, Industry Sector and OHSUMED. The results show that their proposed technique is able to improve classification performance of centroid classifier dramatically. The resulting classifier not only approaches the state-of-the-art SVM in classifying performance, but also beats it in running time. 
\subsubsection{The DragPushing Method}

Tan et al.~\cite{tan2005using} proposed an strategy named "DragPushing" to improve the classification accuracy of centroid classifiers.
Using the training data set, the algorithm first calculates the prototype vectors, or centroids, for each of the available document classes. Using misclassified examples, it then iteratively refines these centroids; by dragging the centroid of a correct class towards a misclassified example and in the same time pushing the centroid of an incorrect class away from the misclassified example. 

Experiments conducted on two benchmark collections including Reuter-21578 and WebKB show that its classification accuracy is comparable to that of more complex methods,such as support vector machines(SVM). 

\subsection{Support Vector Machine}
Support Vector Machines (SVM) were first introduced by V.Vapnik et al. ~\cite{Cortes95support-vectornetworks} in 1995. The idea is to find the best regression line between two classes of data. The regression line is generalized to high dimensional space in which we look for the best hyper-plane the separates the two classes of data. The classifier is trained by a set of points $(\mathbf{x}_1, y_1),\dots, (\mathbf{x}_n,y_n)$ where $y_i=\pm 1$. The goal is to find the best hyper-plane $\mathbf{w}\mathbf{x} + b  = 0 $ ($w$ is called the weight where $b$ is the bias term) that separates the two classes of data $\pm1$ with the maximum margin to the nearest point of any class. We select vector $w$ and $b$ that maximizes the margin. This can be solved by quadratic programming ~\cite{Boser92atraining}. In fact, we need to find the maximum margin between the two normalized hyperplanes $\mathbf{wx}+b = \pm1 $ which is $2/\Arrowvert \mathbf{w} \Arrowvert = 2/\sqrt{\mathbf{w}^T \mathbf{w}}$; therefore we minimize $\Arrowvert \mathbf{w} \Arrowvert$.
\begin{center}
	$min_{\mathbf{w},b} \frac{1}{2} \mathbf{w}^T\mathbf{w} $ \\
	subject to $y_i(\mathbf{wx}+b)\geq 1$ , $i \in \{1,\dots,l\}$ training.
\end{center}

The decision rule is defined by $g(\mathbf{x}) = sgn(\mathbf{wx} + b)$, where the sign represented the binary classification output. Sometimes the data is not linearly separable, therefor, we define a kernel function $\theta(\mathbf{x})$ that maps to a point in higher dimension that can be linearly separable ~\cite{Boser92atraining}. 
In ~\cite{1181211} The author presents a way of using SVM starting from data represention, extracting features from training documents, selecting a subset of features (reduce diamensionality), converting training data to many binary sets of data. Hence, SVM deals only with binary classes.

 \cite{springerlink:10.1007/BFb00266a83} discusses the reason SVM works well in the application of Text Categorization. SVMs can be used in many other applications as well such face detection, writing detection, and others. First, SVMs learning is independent of the dimensionality of feature space, because SVM complexity depends on the margin that separates the two classes of data, that is solved independenlty as a quadratic optimization problem that is not the number of features. Text categorization usually involves high diamension feature space (typically more than 10000), and SVM tolerates such a setting. In text categorization, only few features are irrelevant. Therefore, conventional techniques usually suffer from the high dimension, where SVM adjusts very well. Also, most text categorization problems are linearly separable, and the linear SVM works fine. Although SVMs can find polynomial separators. Several experiments using Reuters-21578 dataset are conducted between SVM and conventional methods such as Naive Bayes, Rocchio Algorithm, k-nearest Nabours, and C4.5 decision tree/rule. K-NN performs best with respect to conventional methods. SVM has a better precision/recall break-even than all conventional methods. It is also faster than k-NN at classification time, but obviously slower in the training time. Its training time is rather comparable with C4.5.
 Support Vector Machines were designed originally to deal with binary classes. There are many tweaks to make it adopt with multi-classes which is quite an important requirement when comparing with other classifiers.  Chih-Wei Hsu and Chih-Jen Lin \cite{Hsu02acomparison} give an overall comparison between different methods. There are two approaches to achieve multi-class classifier. One approach combinaes several binary classifiers such as ``one-against-one'', one-against-one`` and direct acyclic graph SVM (DAGSVM). The other approach solves all classes in one big optimization problem called ''all-together``. Solving large optimization problems tend to be computationally expensive. The author demonstrates a decomposition method and a method that considers all variables together in order to reduce the computational cost.

Experiments show that One-against-one and DAG have fast training time and good performance comparing to other ``all together'' methods which makes them more practical to apply.
 \cite{ Wang:2009:OFS:1528927.1529034} introduces fuzzy set theory in SVMs. Fuzzy set theory introduces a membership function to the classical set theory where the membership is a binary operator. $M: S \rightarrow [0,1]$ , where $M(\mathbf{x})$ is the membership function. In the SVMs, fuzzy membership can reduces the impact of outliers in the training data. However it is important to select a good membership function to obtain better recall and precision than the classical SVMs.

The authors introduces two different membership functions in an OAO-FSVM classifier. One is fully automatic and another with a tune-able control parameter. Then they compare them with OAO-SVM using Gaussian kernal function, using reuters 21578 data set. The F measure of the FSVM in both input space and feature space are slightly higher or equal to the normal SVM depending on the tune-able control parameter.

Another study examined two different types of keyword selection process in classification with SVM. \cite{özgür2005text} They used the standard Reuters dataset in the experiments and both Boolean and tf-idf weighting schemes. The first type of the keyword selection is the Class-Based which means the keyword selection process is performed separately in each individual class. While the Corpus-Based keyword selection process is performed for all the classes at the same time and the result is a set of most important words in these documents. Afterwards, they analyzed the two keyword selection approaches in terms of micro-averaged F-measure, macro-averaged F-measure and classification time. They conclude by revealing that selecting the keywords the performance of the SVM improved in terms of both F-measures and time. For example, corpus-based with using 2000 keywords perform better than the case of using all the words in the classification process. However, in the class-based approach with the tf-idf scheme performed better with small number of keywords (50-100) than the corpus based and the case of using all the words in classification. Finally, the class-based approach with Boolean scheme is satisfying too with 82 percent success rate, and could be used in cases of limited time and space resources.

Given that SVM is widely used nowadays in many applications and areas, the conductors of one study declared some factors that affect the accuracy and the speed of the SVM. The accuracy of the classification in the training process of SVM affected by the kernel parameters setting and feature selection. Therefore, the purpose of this segment of SVM research is reducing the high dimension in the feature vector, choosing the optimal input feature subset for SVM and optimizing the kernel parameters for the sake of increasing the accuracy and speed.\cite{rujiang2007combination}

\subsection{Comparing Different Classifiers}
 A comparison was conducted by Microsoft and Standford University researchers ~\cite{Dumais98inductivelearning} presenting five different generative classifiers; namely, Find Similar, Decision Trees, Naive Bayes, Bayes Nets and Support Vector Machines (SVMs). The comparison is done in term of learning speed, real-time classification speed and classification accuracy. It shows that SVMs provides a better accuracy among all with a very fast learning speed. Find similar method uses Jaccard similarity measure to compare test vectors to category weights. Bayes Nets is similar to Naive Bayes, but further relaxes the assumption of feature independence to only 2 levels of dependence. SVMs find the hyperplan between the two classes with the maximum distance between the hyper-plan with the nearest point. They use Reuter-21578 text corpus to conduct experiments. Their result shows that Find-similar has the fastest learning speed and the next is SVM with slight difference. The naive Bayes comes 4 times slower than SVN, and Byes Nets again much slower than Naive Bayes (8:145). The five classifiers give a very fast classification speed, since the most of the time is actually spend on the training. SVMs give the highest accuracy among all with around 92\%, followed by Decision Tree was lower by 3.6\%. Bayes Nets give slightly better performance than Naive Bayes, but the training was much slower.

Another comparison was presented by researchers Yang and Liu at the Information Retrieval Conference. \cite{yang1999re} The existed Text Categorization (TC) literature has a rich base of information for different -methods and clear results for each. However, sometimes these results are not comparable or not providing statistically clear comparisons between these methods because of using different performance measures or different data sets for experiments. Therefore, the contribution of this paper is to conduct a controlled study that provides statistical comparable results on five commonly used Text Categorization methods: the Support Vector Machines (SVM), a k-Nearest Neighbor (kNN) classifier, a neural network (NNet) approach, the Linear Least-squares Fit (LLSF) mapping and a Naive Bayes (NB) classifier. Another contribution for this paper is to examine the performance of each classifier as a training function, especially when dealing with a skewed category distribution.

Before the experimental part, the conductors of the study designed a set of significance tests to compare between the methods using suitable performance measures. They used micro sign test (s-test) and comparing proportions test (p-test) methods to evaluate the performance in the micro level, however; they used macro sign test (S-test), macro t-test (T-test) and macro t-test after rank transformation (T’-test) to evaluate the performance in the macro level.

Later, they used statistical feature selection methods for each classifier to come up with number of features for each of them. Reuters-21450 was the dataset that used to train the classifiers, and the results used to set up some settings such as deciding the value of k in the kNN method.

In the end, they used tables to show the results of the significance tests, performance tests and comparing proportions (p-test). The last table gives the conclusion of this paper that SVM and kNN significantly outperform among the others; while the NB is significantly underperform the others. However, LLSF was better than NNet but both have an average performance between the previous methods.

Another study proposed a hybrid solution that uses Rough Sets Theory (RST) to reduce the feature vectors dimensions and uses genetic algorithm to help in selecting the input feature and optimize the parameters for the SVM. \cite{rujiang2007combination} They called the proposed system RGSC (Rough sets and Genetic algorithms for SVM classifier). For the experiments environments, they used YALE (Yet Another Learning Environment) to develop and implement the system. Moreover, they used Reuters dataset to test the accuracy. They also investigated the performance of kNN and Decision tree to compare their performance with the solution system later.

The results showed that RGSC perform generally high precision compared to kNN and Decision tree methods. Moreover, while the recall ratio in kNN and Decision tree were about the same, RGSC performed a higher recall ratio than them. Furthermore, the RGSC has showed the average highest classification results, and the F-measures testing indicated that RGSC is able to effectively process categories with large documents, but poorer with smaller documents. 
\subsection{Multiclass Data and Multilabeling}

In many document sets, individual documents belong to more than one class. In addition, the document set is nearly always multiclass, having more than two different categories in which the documents are clustered. This leads to a problem for classifiers, most of which were originally designed to handle only bivariate, single label classification tasks. One study proposed using a combination of methods to handle this situation. \cite{zelaia2011multiclass}

After reducing dimensionality by projecting features into "p" feature space (with a predetermined p), the researchers had to determine at which levels to set their three main parameters. These parameters depend on each other and there is no real theoretical way to find the optimal values for the experimental setup. The researchers addressed this problem by setting one parameter to an arbitrary level, "tuning" the other two parameters until they appeared relatively optimal, and finally re-tuning the original parameter.

The main algorithm consisted of first running a kNN classifier on the reduced vector space, then using a bayesian voting scheme to determine whether one, zero, or more than one label should be applied to the document. 

\section{Main Technical Challenges}
\subsection{High Dimensionality of Data}
The nature of text categorization means that under most methods, the feature space will have a very high dimentionality. As mentioned in the literature review section, many classifiers (such as kNN) have poor performance under such conditions. This is known as the "curse of dimensionality." Feature selection and alternative methods of document representation such as CSA improve some classifiers by providing a reduced feature (or concept) space. Some methods such as SVM are not affected by high dimensionality. 
\subsection{Computing cost}
The time and computational energy it takes to train and run a classifier contribute to its computating cost. There is often a tradeoff between accuracy and computing cost; for example, under most all circumstances SVM performs significantly better than kNN, but kNN requires far less computation time than SVM. Many efforts have been made to simplify or adapt classifiers to lower computing cost - for example, changing the experimental setup so that less iterations are required. 
\subsection{Problems with varying classes and labels}
The Reuters data set is both multiclass and multilabel. Some documents are even unlabeled. This presents a challenge for most classifiers, as they were designed originally for bivariate and single label data, assigning exactly one label to each data point. Many of the most modern text categorization research studies deal with the adapting of single-label classifiers to multi-labeled data. \cite{zelaia2011multiclass}
Additionally, some categories have many documents and some have very few. This makes it hard for some algorithms to accurately classify, as the lack of information in certain categories leads to a lack of representation. Often a weighting method is required to handle this problem. 
\subsection{Comparison across studies}
The performance of differenct classifiers is largely determined by the conditions under which they are running. Factors such as the given text corpus, parameters, and weighting systems alter the efficiency of clasification. Most algorithms can only select optimal parameters empirically, meaning each study might produce different results. Many studies use varying experimental setups even with the same data (Reuters), making it difficult to accurately compare results. 

Some techniques have been standardize to encourage relevant comparisons. First, standard measures of accuracy in classification have been developed, such as precision, recall, and F-measure. Second, standard document setups (including splits between training and testing data) now exist with the most commonly used databases for text categorization like Reuters. This allows researchers to more accurately compare their results across studies. \cite{zelaia2011multiclass}

\subsection{Challenges with Centroid-based Classifier}
The learning time and testing time of centroid-based classifiers is much shorter than SVM. However, it's accuracy is lower than SVM due to the inductive bias or model misfit incurred by the centroid assumption. Many literatures have addressed on this issue. But the majority of them only consider applying these improved mehtods to single-label text classification, few of them extend their approaches to multi-label multi-class datasets. 

Multi-label classification methods are increasingly required by modern applications, such as protein function classification, music categorization and semantic scene classification~\cite{tsoumakas2007multi}. Thus, it is necessary to study multi-label multi-class text categorization using centroid-based classifer. 
\subsection{Challenges with Support Vector Machine}
SVMs provide a very competitive performance with respect to other classifier. From a practical point of view perhaps the most serious problem with SVMs is the high algorithmic complexity and extensive memory requirements of the required quadratic programming in large-scale tasks \cite{Boser92atraining}. Another set of limitation of the support vector approach lies in choice of the kernel \cite{springerlink:10.1007/BFb00266a83}.
SVM originally is a binary classifier and we can make multi-class by creating multi binary classifier which increases the computational requirement. Multi-label is a challenge to SVM because in the traditional SVM, any overlap in classes is considered as classification error \cite{Wang_paralleland}.

SVM cannot deal with large data when a kernel is used due to large memory requirement, therefore, linear SVM is used to solve much larger problems \cite{Boser92atraining}.

\section{Research Groups}
\subsubsection{Information Extraction and Synthesis Laboratory, University of Massachusetts Amherst, MA, USA} Their research areas include information extraction from the Web, understanding the connections between people and  organizations, expert finding, social network analysis, and mining the scientific literature and community. Andrew McCallum leads this lab. He has 20 publications on text classification.

\subsubsection{Kamal Nigam, School of Computer Science. Carnegie Mellon University. Pittsburgh, PA, USA} He focuses on  implementing data mining and machine learning systems that solve real business problems. He has 19 publications on Text Classification.
				
\subsubsection{Bernardete Ribeiro, Departamento de Engenharia Informatica, University of Coimbra} Her research interests include Computational Learning, Pattern Recognition, Manifold Learning, Neural Networks, Support Vector Machines, Intelligent Control, Fault Detection and Diagnosis. She has 17 publications on Text Classification.	
			
\subsubsection{Zheng Chen, Microsoft Research Asia} His research interests includes Machine Learning, Information Retrieval, Speech Recognition, Natural Language Processing, Multimedia information retrieval, personal information management, and Artificial intelligence. He has 16 publications on Text Calssification.

\subsubsection{Classification Research Group, Imperial College London, UK} This group includes 11 members, and focuses on two directions: classification and data mining. The classification effort focuses on both methodological research and particularly novel, non-standard applications. Their work in classification has significant overlap with other areas, including machine learning and pattern recognition, so that their publications appear in a wide literature.

\subsubsection{Jingyu Zhu, Embedded and Pervasive Computing Center, Shanghai Jiao Tong University, China} This group focus on information retrieval and persvasive computing.

\subsubsection{Songbo Tan, Institute of Computing Technology, Chinese Academy of Sciences, China} They focus on text classification, sentiment analysis, and data mining.

\subsubsection{Arlindo L. Oliverira and Ana Cardoso-Cachopo, IST-DEI Portugal} They focus on Information Retrieval, more specifically in Text Categorization.

\section{Project Proposal}
\subsection{Project Title}
A Novel Centroid-based Classifier in Multi-label Text Classification.
\subsection{Motivation and Objectives}
Text categorization is a fundamental task in such aspects of natural language processing as information retrieval, information extraction, and text mining~\cite{fujino2008multi}. Text categorization is generally defined as assigning one or more predefined category labels to each data sample. A text document often belongs to multiple categories in real tasks such as web pages, news articles and international patent categorization. Therefore, it is necessary to study the performance of different classifiers for such multi-label categorization tasks.

In single-label Text Classification, SVM is the-state-of -the-art classifier due to its high accuracy. However, it has a lower efficiency compared to other classifiers. The traditional centroid-base classifier is a simple and efficient classifier with a slightly lower accuracy compared with SVM. But there are also several refined centroid-based classifiers which have comparable or even higher accuracy than SVM. For instance, consider the Expectation-Maximization method proposed by Cachopo et al. and the Class-Feature-Centroid method proposed by Guan et al en.  While these studies are conducted under the circumstances of single-label datasets, few literatures have compared the accuracy of these text categorization methods with the task of multi-labeling. In this project, we will combine the EM method and the CFC method to build a new classifier for multi-class, multi-label text categorization tasks. Then we will compare the performance of this refined methods with other classifiers such as the traditional centroid-based classifier,\textit{k}NN , SVM. 

\subsection{Approach}
In this novel centroid-based classifier, the weight for term $t_{k}$ of class \textit{j} is calculated as:
\begin{center}
$w_{ij} = b^{\frac{DF_{t_{i}}^{j}}{\left|C_{j}\right| }}\times \log(\frac{\left|C\right|}{CF_{t_{i}}})$ 
\end{center}




where $DF_{t_{i}^{j}}$ is term $t_{i}$'s document frequency in class $C_{j}$, $\left|C_{j}\right|$ is the number of documents in class $C_{j}$, $\left|C\right|$ is the total number of document classes, $CF_{t_{i}}$ is the number of classes containing term $t_{i}$, and \textit{b} is a constant larger than one.

We will combine the information contained in the labeled and unlabeled data using the Expectation-Maximization algorithm to update the centroids during the training phase.

We will use Reuters-21578 as the real-world dataset with accuracy as the criterion for evaluating the performance of the classifiers.
\begin{center}
$Accuracy = \frac{Number\ of\ correctly\ classified\ test\ documents}{Number\ of\ total\ test\ documents}$
\end{center}


\section{Roles of Team Members}
This project was hosted by Google Code. All the planning, document summaries, and task assignments are updated in ``http://code.google.com/p/cis501-group6/''. The responsibilities of all the team members are listed below:
\subsubsection{Aisha Al Zbeidi}
Read and summarized different articles which compared between the most popular text categorization methods using the Reuters testing collection. Discussed existed proposals of improving SVM and developing language independent categorization techniques. Also provided the report introduction.
\subsubsection{Catherine Wilcox}
Focused primarily on feature selection methods. Additionally reviewed multiclass/multilabel methods and concise semantic analysis, and identified several main challenges of text categorization. Contributed to the overall editing and organization of the report. 
\subsubsection{Majid Khonji}
Studied Support Vector Machines and summarized related articles. Also discussed several comparisons between classifiers and their relative performance. He will conducts experiments on SVM with different kernels (if required) in the next phase.
\subsubsection{Wen Shen}
Completed the literature review on Centroid-based classifers and proposed to extend the refined Centroid-based classifiers to multi-label text categorization. Based on the EM method and the CFC method(which both belong to centroid-based classifiers),  Also developed proposal to build a novel centroid classifier for multi-label text categorization.

% An example of a floating figure using the graphicx package.
% Note that \label must occur AFTER (or within) \caption.
% For figures, \caption should occur after the \includegraphics.
% Note that IEEEtran v1.7 and later has special internal code that
% is designed to preserve the operation of \label within \caption
% even when the captionsoff option is in effect. However, because
% of issues like this, it may be the safest practice to put all your
% \label just after \caption rather than within \caption{}.
%
% Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
% option should be used if it is desired that the figures are to be
% displayed while in draft mode.
%
%\begin{figure}[!t]
%\centering
%\includegraphics[width=2.5in]{myfigure}
% where an .eps filename suffix will be assumed under latex, 
% and a .pdf suffix will be assumed for pdflatex; or what has been declared
% via \DeclareGraphicsExtensions.
%\caption{Simulation Results}
%\label{fig_sim}
%\end{figure}

% Note that IEEE typically puts floats only at the top, even when this
% results in a large percentage of a column being occupied by floats.


% An example of a double column floating figure using two subfigures.
% (The subfig.sty package must be loaded for this to work.)
% The subfigure \label commands are set within each subfloat command, the
% \label for the overall figure must come after \caption.
% \hfil must be used as a separator to get equal spacing.
% The subfigure.sty package works much the same way, except \subfigure is
% used instead of \subfloat.
%
%\begin{figure*}[!t]
%\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
%\label{fig_first_case}}
%\hfil
%\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
%\label{fig_second_case}}}
%\caption{Simulation results}
%\label{fig_sim}
%\end{figure*}
%
% Note that often IEEE papers with subfigures do not employ subfigure
% captions (using the optional argument to \subfloat), but instead will
% reference/describe all of them (a), (b), etc., within the main caption.


% An example of a floating table. Note that, for IEEE style tables, the 
% \caption command should come BEFORE the table. Table text will default to
% \footnotesize as IEEE normally uses this smaller font for tables.
% The \label must come after \caption as always.
%
%\begin{table}[!t]
%% increase table row spacing, adjust to taste
%\renewcommand{\arraystretch}{1.3}
% if using array.sty, it might be a good idea to tweak the value of
% \extrarowheight as needed to properly center the text within the cells
%\caption{An Example of a Table}
%\label{table_example}
%\centering
%% Some packages, such as MDW tools, offer better commands for making tables
%% than the plain LaTeX2e tabular which is used here.
%\begin{tabular}{|c||c|}
%\hline
%One & Two\\
%\hline
%Three & Four\\
%\hline
%\end{tabular}
%\end{table}


% Note that IEEE does not put floats in the very first column - or typically
% anywhere on the first page for that matter. Also, in-text middle ("here")
% positioning is not used. Most IEEE journals/conferences use top floats
% exclusively. Note that, LaTeX2e, unlike IEEE journals/conferences, places
% footnotes above bottom floats. This can be corrected via the \fnbelowfloat
% command of the stfloats package.



\section{Conclusion}
In this paper, we studied several text classification techniques mainly focusing on feature selction, centroid-based classifiers and Support Vector Machines (SVM). We compared the performance of several classifiers including \textit{k}NN, Naive Bayes, Centroid-based classifier and SVM. Based on a wide literature review, we proposed a novel centroid-based classifer which will be implemented in the future.



% conference papers do not normally have an appendix


% use section* for acknowledgement
\section*{Acknowledgment}


We would like to thank Dr. Wei Lee Woon for his helpful suggestions on our project proposal.





% trigger a \newpage just before the given reference
% number - used to balance the columns on the last page
% adjust value as needed - may need to be readjusted if
% the document is modified later
%\IEEEtriggeratref{8}
% The "triggered" command can be changed if desired:
%\IEEEtriggercmd{\enlargethispage{-5in}}

% references section

% can use a bibliography generated by BibTeX as a .bbl file
% BibTeX documentation can be easily obtained at:
% http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
% The IEEEtran BibTeX style support page is at:
% http://www.michaelshell.org/tex/ieeetran/bibtex/
%\bibliographystyle{IEEEtran}
% argument is your BibTeX string definitions and bibliography database(s)
%\bibliography{IEEEabrv,../bib/paper}
%
% <OR> manually copy in the resultant .bbl file
% set second argument of \begin to the number of references
% (used to reserve space for the reference number labels box)

\bibliography{citation}

% that's all folks
\end{document}
