\documentclass[10pt,journal,compsoc]{IEEEtran}
\usepackage[utf8x]{inputenc}
\usepackage{amsmath}
\usepackage{epsfig}
\usepackage{verbatim} 
\usepackage{tabularx}

\usepackage{array}


\hyphenation{op-tical net-works semi-conduc-tor}


\begin{document}

\title{Classification of English Texts}

\author{David~Kunštek, Ivana~Kolarič}


\IEEEcompsoctitleabstractindextext{%
\begin{abstract}
Understanding of natural languages is a trivial problem for a human. However, a computer cannot understand a language as human does. To a computer, it is just a sequence of numbers with no deeper meaning. In this paper we touch how to transform English text to computer readable format and perform clustering which will distribute different texts in different clusters according to their topic. The transformed text is called Bag of words model. Text preprocessing is primary used for simplification and minimisation of such models. We experimented with different text preprocessing methods to see how that effects the categorisation accuracy.
\end{abstract}
%  In particular, the Computer Society does
%  not want either math or citations to appear in the abstract.

% Note that keywords are not normally used for peerreview papers.
\begin{IEEEkeywords}
Text mining, Bag of words, Clustering, Stemming
\end{IEEEkeywords}}


% make the title area
\maketitle

\IEEEdisplaynotcompsoctitleabstractindextext

\IEEEpeerreviewmaketitle

\section{Introduction}

\IEEEPARstart{D}{ocument} or text clustering is a subset of the larger field data clustering, which borrows concepts from the fields of information retrieval (IR), natural language process (NLP), and machine learning (ML). The process of clustering aims to discover natural groupings, and to present an overview of the classes (topics) in a collection of documents \cite{IEEERecentDevelopment:andrewsandfox}.
Clustering text is for example used, when a web search engine returns thousands of pages in response to a broad query, making it difficult for users to browse or to identify relevant information. Clustering methods can be used to automatically group the retrieved documents into a list of meaningful categories. Main problem in document clustering is that neither the number, properties, or membership of classes is known in advance \cite{IEEEwiki:cluster}.

There is a large amount of literature describing clustering and text mining related problems. We presented those in Section \ref{sec:relatedwork}.

Main goal of this survey is to implement preprocessing of texts and to preform clustering on a known data set which went through different preprocessing steps. As test data we used well known corpus Reuters 21578, which is described in Section \ref{sec:reuters}.


The first challenge in clustering problem is to determine which features of a document are considered important and how to effectively present article as a data set. We seek a document model. A simple and popular approach is to represent each document as a vector of term frequencies. Such model and implementation of preprocessing data is discussed in Section \ref{sec:vectorspacemodel}.

After the model is built, the challenge we are facing is what clustering algorithm to use and which similarity measure is appropriate to cluster examples with such large number of attributes. Clustering algorithms and similarity measures we use are described in Section \ref{sec:clustering}.

Another problem we are facing, after the clustering is performed and we obtained some results, is the determination of how good results are. Although the test data is available (the correct results are known), the determination is not that simple. Some methods for evaluation of clustering are discussed in Section \ref{sec:evaluation}.

Because of the time complexity of clustering algorithms, the number of articles in corpus we used, had to be reduced. The subset of Reuters-21578 and how we obtained it, is described in Section \ref{sec:experiment}. In this Section we also discussed the programs, algorithms and different tools we used in our experiments.

Results obtained with described algorithms and their description are shown in Section \ref{sec:result}.

The quick review of work and conclusion is given in Section \ref{sec:conclusion}.

\section{Related Work}
\label{sec:relatedwork}

There exists a lot of related work in this field. We decided to implement preprocessing with removal of stop words and stemming as described in \cite{IEEEstopWords:sirotkin}\cite{IEEESuffixStripping:porter}.
For better control over experiments described in Section \ref{sec:experiment}, we also implemented results evaluating methods as described in \cite{IEEEhuang:similaritymeasures} and \cite{IEEErao:purity}. For further concepts we addressed to \cite{IEEEontology:Bosnia} where they took Reuters21578 corpus and done multi-label classification of articles. The clustering was supervised. In our experiment clustering was done with fixed number of clusters.

\section{Reuters-21578 description}
\label{sec:reuters}
The corpus consists of 21578 documents of Reuters news wire stories. There are five different sets of content related categories and each document has a specification of what set, and categories within that set, the text belongs to. There are five different sets (Exchanges, Orgs, People, Places and Topics). We used only documents that have specification of Topics which consists of 135 economic subject categories. Because of the complexity of clustering algorithms, we also experimented with sets consisting of 200 and 500 documents. Reuters corpus was studied in many research studies. That made result comparison a lot easier.





\section{Vector Space Model}
\label{sec:vectorspacemodel}
Vector model is a collection of \textit{n} documents with \textit{m} unique terms, represented as \textit{m}$\times$\textit{n} term-document matrix, where each document is a \textit{m}-component vector. Each element of this matrix represents the weight of the term \textit{m} in a given document \textit{n}. 

Several term weighing schemes are used, such as binary frequency and simple term frequency (i.e. how many times the word occur in the document). In our case we used \textit{tf-idf} weighing which will be described later on.

Because no information about word order is encoded, the vector model is sometimes called \textit{Bag of words} or \textit{Dictionary} model.

Another important property of such model is that the number of unique terms could be quite large. That is why several preprocessing steps are used to decrease vector dimension of one document. 

\subsection{Preprocessing}
Preprocessing takes plain text document as input and outputs a \textit{Bag of words} model with \textit{tf-idf} weights.

The size of Vector Space Model is $m \times n$, where $n$ is the number of all documents and $m$ the size of set containing all words which appear in these documents. Most of the components have zero value because certain document does not contain all the words. That means that each $m$ component vector which represents document is very sparse.  Moreover, if there are many documents with different words, the $m$ value increases almost linearly  with the number of documents. To reduce the size of vector space model, preprocessing with removal of stop words and stemming is needed. Stemming decreases the size of various(lexically) similar words. With preprocessing we also lose some structural properties of document.
The preprocessing steps are:

\begin{itemize}
\item{\textbf{Tokenization:}} Splits sentences into individual tokens, typically words.

\item{\textbf{Stop-word removal:}} A stop-word is defined as an extremely common word (\textit{the, is, at, which and on}), which occurs in every document. In order to save space, most common words are removed, based on a known \textit{English Stop-words} list. We also used some custom selected stop-words which will be described later in Section \ref{sec:experiment}.

\item{\textbf{Stemming\cite{IEEESuffixStripping:porter}:}} The process of reducing words to their base form, or stem. For example, the words "connected", "connection", "connections" are all reduced to the stem "connect". We used Porter's algorithm which is the de-facto standard algorithm used for English stemming.

\item{\textbf{Bag of words model}} is in our case created for each article in corpus. Model is created from list of tokens preprocessed with stemming algorithm. Tokens are basically actual English words from article and are ordered by their appearance in article. Each word has also information about its occurrence frequency which is later used to calculate \textit{tf-idf} weight and then replaced by \textit{tf-idf} value.

\item{\textbf{\textit{tf-idf} implementation:}} This weight is a measure used to evaluate how important a word is to a document in a collection of corpus. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus. Tf-idf is defined as:
  $$(tf-idf)_{i,j} = tf_{i,j} \cdot idf_i$$
  
where $if_{i,j}$ is the term frequency defined as 
$$tf_{i,j} = \frac{n_{i,j}}{\sum_k n_{k,j}}$$
and $idf_i$ (\textit{inverse document frequency}) defined as logarithm of measure, of the general importance of the term. General importance of the term is obtained by dividing the total number of documents by the number of documents containing the term.
$$idf_i = \log \frac{|\mathcal{D}|}{|\mathcal{D}_t|}$$
\item{\textbf{Serialization}} of Bag of words model into \textit{Weka} readable format(.arff). That was later used in RapidMiner as input format. 
\end{itemize}


\section{Clustering}
\label{sec:clustering}
\subsection{Types of clustering}
\begin{itemize}
\item \textbf{Hierarchical} algorithms find successive clusters using previously established clusters. These methods usually are either agglomerative ("bottom-up") or divisive ("top-down"). Agglomerative algorithms begin with each element as a separate cluster and merge them into larger clusters. Divisive algorithms begin with the whole set and proceed to divide it into smaller clusters. The traditional representation of cluster hierarchy is a dendrogram (a binary tree with individual examples at the leaves and a cluster at the root which includes all examples). Cutting the tree at the given height will give a clustering at a selected precision.

\item \textbf{Partitional} algorithms typically determine all clusters at once, but can also be used as divisive algorithms in the hierarchical clustering. A partition clustering algorithm obtains a single partition of the data instead of a clustering structure, such as the dendrogram produced by hierarchical methods. This methods have advantages in applications involving large data sets, a problem occurs in a choice of the number of desired output classes.
\end{itemize}

\subsection{k-Mediods}
The k - Mediods is related to k-means algorithm. It is a partitional algorithm and attempts to minimize squared error (distance between points labeled to be in the cluster and a point designated as the center of that cluster - mediod). A mediod is the object whose average distance to all objects is minimal. In contrast to the k-means the k-mediods is more robust to noise and outliers. It minimises a sum of pairwise dissimilarities instead of a sum of squared Euclidean distances. 


\subsection{Similarity measure}
It is important to select an appropriate similarity measure, which will determine how the similarity of two elements is calculated. This will influence the shape of the clusters, as some elements may be close according to one distance and farther away according to another.

In most cases the similarity measure is estimated by a function which calculates the distance between vectors of these documents. There are several measures of similarity. Listed below are those we used in our experiments: 



\begin{enumerate}
\item \textbf{The euclidean distance}
Euclidean distance is the ordinary distance between two points.
The distance between two documents $d_a$ and $d_b$ represented by their term vectors $\vec{t_a}$ and $\vec{t_b}$ is defined as:
$$d(\vec{t_a},\vec{t_b}) =(\sum{|w_{t,a}, w_{t,b}|^2})^{1/2}$$
where the term set is $ T = \{t_1, ..., t_m\} $ and value of one term is $ w_{t,a} = tfidf(d_a, t) $

\item \textbf{The cosine distance}
Cosine distance is the cosine of the angle between vectors. Given the two documents $\vec{t_a}$ and  $\vec{t_b}$, their cosine similarity is:
 $$ SIM_c(\vec{t_a}, \vec{t_b}) = \frac{\vec{t_a} \cdot \vec{t_b}}{ |\vec{t_a}| \times |\vec{t_b}|} $$
 where $\vec{t_a}$ and  $\vec{t_b}$ are $m$-dimensional vectors over the term set $ T = \{t_1, ..., t_m\} $ The cosine similarity value is between [0,1].

\begin{comment}
\item \textbf{The Manhattan distance}
is the metric of the Euclidean plane defined by
$$ g((x_1,y_1),(x_2,y_2)) = |x_1 - x_2| + |y_1 - y_2|,$$
for all points $P_1(x_1,y_2)$ and $P_2(x_2,y_2)$. This number is equal to the length of all paths connecting $P_1$ and $P_2$ along horizontal and vertical segments, without ever going back, like those described by a car moving in a lattice-like street pattern. The equation is example for distance of two dimensional points and can be easily extended to $n$ dimensions. For two $n$ component vectors $ \vec{v_1}$ and $\vec{v_2}$ the Manhattan distance is
$$d_m(\vec{v_1},\vec{v_2}) = \sum_{i=1}^{n} |v_{1,i} - v_{2,i}|$$
where $v_{1,i}$ is the $i$th component of vector $v_1$.

\end{comment}
\end{enumerate}
\section{Evaluation of clustering}
\label{sec:evaluation}
To evaluate the quality of our results, we used measure of purity and entropy, which are often used in evaluation of clustering.

The purity measure is defined as:
$$ purity(C_j) = \frac{1}{|C_j|} \max_i(|C_j|_{class=i}) $$
The overall purity of a clustering solution can be expressed as a weighted sum of individual cluster purities.
$$ purity = \sum^k_{j=1}\frac{C_j}{|\mathcal{D}|} purity(C_j) $$	
Where $\mathcal{D}$ presents a dataset, $|C_j|$ is size of  cluster $C_j$ and $|C_j|_{class=i}$ number of items of class $i$ assigned to cluster $j$. 

Larger the value of purity better the clustering solution.

The entropy measure evaluates the distribution of categories in a given cluster, which makes this measure more comprehensive than purity. It is defined as:
$$E(C_j) = -\frac{1}{\log(k)} \sum^k_{i=1} \frac{n^i_j}{n_j} \log(\frac{n^i_j}{n_j} ) $$
 
The entropy of the overall solution is defined as the weighted sum of the individual entropy value of each cluster:
where $n^i_j$ is the number of documents from $i$th class that were assigned to cluster $C_i$ and $k$ is number of clusters.


$$ Entropy = \sum^k_{j=1} \frac{n_j}{n}E(C_i) $$

The smaller the entropy value, the better the quality of the cluster is.
If purity equals 1 and the value of entropy is 0, the clustering returned perfect results.

 


\section{Experiment}
\label{sec:experiment}
The goal of experiment is to test different clustering measures with different preprocessing set-ups and see how that affects clustering accuracy. 
\subsection{Data}
Experimental data consists of random set of articles from Reuters-21578. The category of these articles is known and we used this information for later evaluation of results.

\begin{itemize}
\item \textbf{RawText (Tokenized)} which includes documents with almost none preprocessing except with necessary splitting of sentences into words.
\item \textbf{English StopWords No Stemming} includes the same elements as previous data, except of those based on English StopWord list.
\item \textbf{English StopWords With Stemming} includes data with all preprocessing steps including stemming. 
\item \textbf{Stopwords 05} consists of previous set of data without words that occurred more than 5 times in one article.
\item \textbf{Stopwords 10} set of preprocessed documents without words that occurred more than 10 times in some articles and words from standard English Stop-word list. 
\item \textbf{Stopwords 15} same set of documents as previous but without words that occurred more than 15 times. 
\item \textbf{Stopwords 20} same set of documents as previous but without words that occurred more than 20 times. 
\end{itemize}

For example, \textit{Stopwords 05} does not present actual stop words list but a set of documents without words that were on that list. (Without words that occurred more than 5 times in some documents / articles).

\subsection{Computing}
Clustering was performed with program \textit{RapidMiner}, where you can simply draw your data-flow processing path. Because of computationally expensive algorithms the computing of one test took more than an hour. \textit{RapidMiner} presented results in table which were exported to our clustering accuracy evaluation program. 

\subsection{Testing}
The experiments were preformed on articles from 8 different topics. We used top eight topics that were listed in article \cite{IEEEincremental:zhou}. Because of known number of clusters there was no need to use agglomerative clustering. We used k-mediods clustering with Euclidean and Cosine similarity measures. 
\section{Results}
\label{sec:result}
As mentioned before we evaluated clustering results with purity and entropy measure. In the text that follows, we will refer to different datasets by their names. Their content is already explained in Section \ref{sec:experiment}.

As can be seen in Table \ref{table:euclidean200}, clustering (with Euclidean similarity) of a small data set that did not go through the phase of preprocessing (\textit{Raw Text (Tokenized)}), gives slightly better results than clustering of a data set which was preprocessed with removal of English stop words and stemming. Moreover, the clustering of \textit{English StopWords With Stemming} data set gave worst results.




The purity and entropy values show that, The Euclidean similarity is, in every experiment, better than the cosine. 

As can be seen in Table \ref{table:cosine200}, the cosine similarity gives better results on a dataset without words which occurred more than 15 times.

Best results according to purity and entropy values gave preprocessing with removal of words which occurrence frequency is higher than 20 and clustering with euclidean similarity. 


\begin{table}[htbp]
\begin{center}
\begin{tabular}{l|c|c}

\textit{} & \multicolumn{1}{l|}{\textit{PURITY}} & \multicolumn{1}{l}{\textit{ENTROPY}} \\  \hline \hline
\textit{English StopWords No Stemming} & 0.905 & 0.228 \\ 
\textit{English StopWords With Stemming} & 0.825 & 0.254 \\ 
\textit{Raw Text (Tokenized)} & 0.940 & 0.285 \\ 
\textit{Stopwords 05} & 0.870 & 0.198 \\ 
\textit{Stopwords 10} & 0.895 & 0.284 \\ 
\textit{Stopwords 15} & 0.855 & 0.198 \\ 
\textit{Stopwords 20} & 0.945 & 0.133 \\ 
\end{tabular}
\end{center}
\caption{200 examples with euclidean similarity.}
\label{table:euclidean200}
\end{table}


\begin{table}[htbp]
\begin{center}
\begin{tabular}{l|c|c}

\textit{} & \multicolumn{1}{l|}{\textit{PURITY}} & \multicolumn{1}{l}{\textit{ENTROPY}} \\  \hline \hline
\textit{English StopWords No Stemming} & 0.540 & 0.603 \\ 
\textit{English StopWords With Stemming} & 0.540 & 0.569 \\ 
\textit{Raw Text (Tokenized)} & 0.565 & 0.608 \\ 
\textit{Stopwords 05} & 0.600 & 0.545 \\
\textit{Stopwords 10} & 0.520 & 0.566 \\ 
\textit{Stopwords 15} & 0.615 & 0.535 \\ 
\textit{Stopwords 20} & 0.495 & 0.640 \\ 
\end{tabular}
\end{center}
\caption{200 examples with cosine similarity.}
\label{table:cosine200}
\end{table}




Results obtained from clustering of data set with 500 examples, show that, on the contrary of many surveys, the Euclidean similarity gives far more better results than cosine.
As for the best stop word list, it is shown that the clustering gave worse results, when we removed words with the occurrence frequency higher than 15. 

Clustering with the cosine similarity gave best results when we preformed clustering of the \textit{Stopwords 10} data set. This can be seen in Table \ref{table:cosine500}.

As can be seen in Table \ref{table:euclidean500}, clustering with the Euclidean similarity performed better on a \textit{English StopWords With Stemming} data set. 

Figures \ref{fig:cosine500} and \ref{fig:euclidean500} show, how the removal of words impact on the results of clustering.



\begin{table}[htbp]

\begin{center}
\begin{tabular}{l|c|c}

\textit{} & \multicolumn{1}{l|}{\textit{PURITY}} & \multicolumn{1}{l}{\textit{ENTROPY}} \\ \hline \hline
\textit{English StopWords With Stemming} & 0.374 & 0.684 \\ 
\textit{Stopwords 05} & 0.338 & 0.647 \\ 
\textit{Stopwords 10} & 0.404 & 0.622 \\ 
\textit{Stopwords 15} & 0.328 & 0.670 \\ 
\textit{Stopwords 20} & 0.342 & 0.631 \\ 
\end{tabular}
\end{center}
\caption{500 examples with cosine similarity.}
\label{table:cosine500}
\end{table}

\begin{figure}[htp]
\centering
\includegraphics{./Figures/cosine500.png}
\caption{Graph of entropy and purity measure, of clustering results with cosine similarity, in dependence of different stop words list. Clustering was performed on a data set of 500 documents.}
\label{fig:cosine500}
\end{figure}



\begin{table}[htbp]
\begin{center}
\begin{tabular}{l|c|c}

\textit{} & \multicolumn{1}{l|}{\textit{PURITY}} & \multicolumn{1}{l}{\textit{ENTROPY}} \\ \hline \hline
\textit{English StopWords With Stemming} & 0.590 & 0.146 \\ 
\textit{Stopwords 05} & 0.606 & 0.203 \\ 
\textit{Stopwords 10} & 0.570 & 0.289 \\ 
\textit{Stopwords 15} & 0.538 & 0.275 \\ 
\textit{Stopwords 20} & 0.632 & 0.227 \\ 
\end{tabular}
\end{center}
\caption{500 examples with euclidean similarity.}
\label{table:euclidean500}
\end{table}




\begin{figure}[htp]
\centering
\includegraphics{./Figures/euclidean500.png}
\caption{Graph of entropy and purity measure, of clustering results with euclidean similarity, in dependence of different stop words list. Clustering was performed on a data set of 500 documents.}
\label{fig:euclidean500}
\end{figure}

Further exploration of, why the Euclidean similarity performed better than the cosine, revealed that clustering with the Euclidean similarity put almost all articles in one cluster. That is why purity of one cluster was so high. Clustering with the cosine similarity gave better distribution within clusters. Although, there were not enough articles of the same topic assigned to the same cluster.

\begin{comment}
\begin{table}[htp]

\begin{tabularx}{8,5cm}{ X|X|X|X|X }
  \textit{Topic} & \textit{Number of articles in one topic} & \textit{Clusters} & \textit{Number of items from cluster assigned to topic} & \textit{Number of articles in one cluster} \\
  \hline \hline
acq & 99 & cluster\_0 & 26 & 69 \\ 
crude & 88 & cluster\_4 & 16 & 103 \\ 
earn & 99 & cluster\_3 & 70 & 139 \\ 
grain & 12 & cluster\_6 & 3 & 28 \\ 
interest & 63 & cluster\_4 & 9 & 103 \\ 
money-fx & 48 & cluster\_4 & 14 & 103 \\ 
ship & 36 & cluster\_6 & 12 & 28 \\ 
trade & 55 & cluster\_4 & 37 & 103 \\ 

\end{tabularx}
\caption{The table presents which cluster had most items from certain topic. This applies for data with 500 examples which were preprocessed with removal of english stopwords and stemming, and clustered with cosine similarity.}
\label{table:cosine500dist}
\end{table}




\begin{table}[htp]
\begin{tabularx}{8,5cm}{ X|X|X|X|X }
  
  \textit{Topic} & \textit{Number of articles in one topic} & \textit{Clusters} & \textit{Number of items from cluster assigned to topic} & \textit{Number of articles in one cluster} \\
  \hline \hline
acq & 99 & cluster\_1 & 96 & 399 \\ 
crude & 88 & cluster\_1 & 36 & 399 \\ 
earn & 99 & cluster\_6 & 52 & 95 \\ 
grain & 12 & cluster\_1 & 7 & 399 \\ 
interest & 63 & cluster\_1 & 23 & 399 \\ 
money-fx & 48 & cluster\_1 & 26 & 399 \\ 
ship & 36 & cluster\_1 & 18 & 399 \\ 
trade & 55 & cluster\_1 & 37 & 399 \\ 

\end{tabularx}
\caption{The table presents which cluster had most items from certain topic. This applies for data with 500 examples which were preprocessed with removal of english stopwords and stemming, and clustered with euclidean similarity.}
\label{table:euclidean500dist}
\end{table}
\end{comment}

\section{Conclusion}
\label{sec:conclusion}
The main goal of this paper was to find best similarity measure and stop word list for well known corpus Reuters-21578  and to familiarize the reader with common clustering techniques for text mining. Because of the time complexity of clustering algorithm and limitations of our hardware, we had to reduce the number of documents and topics. The set became too small to represent whole corpus and it gave us unexpected results which we already discussed in Section \ref{sec:result}. 

As mentioned before, according to purity and entropy value, the Euclidean similarity gave much better results than expected. The reason for these results is in size of the data set we used for clustering. Because one example has over 10.000 attributes, 500 examples is not enough to get good results. We think that with bigger data set, we could get better results when clustering with the cosine similarity. 

 
\hfill May 22, 2011



%\appendices
%\section{Proof of the First Zonklar Equation}
%Appendix one text goes here.

%\section{}
%Appendix two text goes here.


\ifCLASSOPTIONcompsoc
  \section*{Acknowledgments}
\else 
  \section*{Acknowledgment}
\fi

The authors would like to thank Domen Košir for mentorship.

\ifCLASSOPTIONcaptionsoff
  \newpage
\fi

\begin{thebibliography}{1}
\bibitem{IEEEhowto:kopka}
H.~Kopka and P.~W. Daly, \emph{A Guide to \LaTeX}, 3rd~ed.\hskip 1em plus
  0.5em minus 0.4em\relax Harlow, England: Addison-Wesley, 1999.
\bibitem{IEEERecentDevelopment:andrewsandfox} 
N.~O. Andrews and E.~A. Fox, \emph{Recent Developments in Document Clustering} \hskip 1em plus 0.5em minus 0.4em\relax Department of Computer Science, Virginia Tech. Blacksburg, Va24060, 2007
\bibitem{IEEEmachinelearningdatam:kononenko} 
I.~Kononenko and M.~Kukar, \emph{Machine Learning and Data Mining} \hskip 1em plus 0.5em minus 0.4em\relax Horwood publishing limited, UK, 2007
\bibitem{IEEEevaloftextclustering:amineelberrichisimonet} 
A.~Amine, Z.~Elberrichi, and M.~Simonet \emph{Evaluation of Text Clustering Methods Using WordNet} \hskip 1em plus 0.5em minus 0.4em\relax The international Arab Journal of Information Technology, Vol. 7, No. 4, October 2010
\bibitem{IEEEincremental:zhou} 
S.~Zhou \emph{Incremental Document Classification In a Knowledge Management Environment} \hskip 1em plus 0.5em minus 0.4em\relax University of Toronto, 2001

\bibitem{IEEESuffixStripping:porter}
Martin F. Porter, \textit{An algorithm for suffix stripping}, Electronic Library \& Information Systems, Vol. 40, No. 3, 1980, pp. 211-218.

\bibitem{IEEEontology:Bosnia}
S. Vogrincic and Z. Bosnic,  \textit{Ontology-based multi-label classification of economic articles},  presented at Comput. Sci. Inf. Syst., 2011, pp.101-119. 

\bibitem{IEEEstopWords:sirotkin}
W. John Wilbur, Karl Sirotkin, \textit{The automatic identification of stop words}, Journal of Information Science, Vol. 18, No. 1, 1992, pp. 45-55.

\bibitem{IEEEreuters:lewis}
D. ~D. Lewis \textit{Reuters-21578}, Test collection for text categorization research, $http://www.daviddlewis.com/resources/testcollections/reuters21578/$ 2011

\bibitem{IEEEhuang:similaritymeasures}
A. ~Huang \textit{Similarity Measures for Text Document Clustering}, Department of computer science, the university of Walkato, New Zeleand

\bibitem{IEEErao:purity}
D. ~Rao \textit{A note on purity} $www.cse.iitm.ac.in/~cs672/purity.pdf$

\bibitem{IEEEwiki:cluster}
\textit{Clustering Analysis} $http://en.wikipedia.com/wiki/Cluster_analysis$

\end{thebibliography}

\begin{IEEEbiographynophoto}{David Kunštek}
Student of Computer science faculty in Ljubljana, Slovenia. 
Email: dejvid.kit@gmail.com
\end{IEEEbiographynophoto}
\begin{IEEEbiographynophoto}{Ivana Kolarič}
Student of Computer science faculty in Ljubljana, Slovenia. 
Email: ivana.kolarich@gmail.com
\end{IEEEbiographynophoto}
\end{document}