%%%%%%%%%%%%%%%%%%%% author.tex %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sample root file for your "contribution" to a contributed volume
%
% Use this file as a template for your own input.
%
%%%%%%%%%%%%%%%% Springer %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


% RECOMMENDED %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[graybox]{svmult}

% choose options for [] as required from the list
% in the Reference Guide

\usepackage{mathptmx}       % selects Times Roman as basic font
\usepackage{helvet}         % selects Helvetica as sans-serif font
\usepackage{courier}        % selects Courier as typewriter font
\usepackage{type1cm}        % activate if the above 3 fonts are
                            % not available on your system
%
\usepackage{makeidx}         % allows index generation
\usepackage{graphicx}        % standard LaTeX graphics tool
                             % when including figure files
\usepackage{multicol}        % used for the two-column index
\usepackage[bottom]{footmisc}% places footnotes at page bottom

% see the list of further useful packages
% in the Reference Guide

\makeindex             % used for the subject index
                       % please use the style svind.ist with
                       % your makeindex program

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{document}

\title*{Identifying concepts on specific domain by a unsupervised graph-based approach}
% Use \titlerunning{Short Title} for an abbreviated version of
% your contribution title if the original one is too long
\author{Name of First Author and Name of Second Author}
\author{Franco Rojas-Lopez, Ivan Lopez-Arevalo, Victor Sosa-Sosa}
% Use \authorrunning{Short Title} for an abbreviated version of
% your contribution title if the original one is too long
\institute{Information Technology Laboratory, Cinvestav - Tamaulipas 
\at Scientific and Technological Park.Victoria, Mexico 
\email{\{frojas, ilopez, vjsosa\}@tamps.cinvestav.mx}
}
% Use \authorrunning{Short Title} for an abbreviated version of
% your contribution title if the original one is too long
   %   V  A  L  I  D  O  S
   % \institute{Name of First Author \at Name, Address of Institute, \email{name@email.address}
   % \and Name of Second Author \at Name, Address of Institute \email{name@email.address}}
%
% Use the package "url.sty" to avoid
% problems with special characters
% used in your e-mail or web address
%
\maketitle

\abstract*{This paper presents an unsupervised approach to Word Sense Disambiguation on a specific domain to automatically to assing the right sense to a given ambiguous word. The approach proposed relies on integration of two source information: context and semantic similarity information. The experiments were carried on English test data of SemEval 2010 and evaluated with a variety of measures that analyze the conectivity of graph structure. The results  obtained were evaluated using precision and recall measures and compared with the results of SemEval 2010 the approach is currently under test with another semantic similarity measures, preliminary results look promising. \newline\indent}
%Each chapter should be preceded by an abstract (10--15 lines long) that summarizes the content. The abstract will appear \textit{online} at \url{www.SpringerLink.com} and be available with unrestricted access. This allows unregistered users to read the abstract as a teaser for the complete chapter. As a general rule the abstracts will not appear in the printed version of your book unless it is the style of your particular book or that of the series to which your book belongs.
%Please use the 'starred' version of the new Springer \texttt{abstract} command for typesetting the text of the online abstracts (cf. source file of this chapter template \texttt{abstract}) and include them with the source files of your manuscript. Use the plain \texttt{abstract} command if the abstract is also to appear in the printed version of the book.

\abstract{ This paper presents an unsupervised approach to Word Sense Disambiguation on a specific domain to automatically to assing the right sense to a given ambiguous word. The approach proposed relies on integration of two source information: context and semantic similarity information. The experiments were carried on English test data of SemEval 2010 and evaluated with a variety of measures that analyze the conectivity of graph structure. The obtained result were evaluated using precision and recall measures and compared with the results of SemEval 2010 the approach is currently under test with another semantic similarity measures, preliminary results look promising. \newline\indent}
%Each chapter should be preceded by an abstract (10--15 lines long) that summarizes the content. The abstract will appear \textit{online} at \url{www.SpringerLink.com} and be available with unrestricted access. This allows unregistered users to read the abstract as a teaser for the complete chapter. As a general rule the abstracts will not appear in the printed version of your book unless it is the style of your particular book or that of the series to which your book belongs.\newline\indent
%Please use the 'starred' version of the new Springer \texttt{abstract} command for typesetting the text of the online abstracts (cf. source file of this chapter template \texttt{abstract}) and include them with the source files of your manuscript. Use the plain \texttt{abstract} command if the abstract is also to appear in the printed version of the book.

\section{Introduction} \label{sec:1}
In Natural Language Processing (NLP) and recently in Computational Linguistics literature, the problem of assign concepts to words in texts has been called Word Sense Disambiguation (WSD); which is defined as a task that consists on selecting the correct sense for a given ambiguous word in a given context. A word is ambiguous when its meaning varies depending on the context in which it {occurs}. There are several approaches that have been proposed for WSD. In general, in the literature there are two main approaches: supervised and unsupervised. Supervised approaches rely on the availability of sense labeled data from which the relevant sense distinctions are learned, while unsupervised approaches typically refer to disambiguating word senses without the use of sense-tagged corpora. Most of the unsupervised approaches proposed in the WSD literature are knowledge based, i.e. they exploit only the information provided by a Machine Readable Dictionary (MRD). Some unsupervised WSD system also use unlabeled data together with the dictionary information to perform an unsupervised approach to disambiguate words. An effective approach to this task would be useful for a number of NLP applications: for example Information Retrival, Content Analisys, Information Extraction, etc. \\Over the last years the interest on WSD has been motivated by WSD competition such as SemEval\footnote{http://semeval2.fbk.eu/semeval2.php?location=} where different system may evaluate their performance. The purpose of SemEval is to perform a comparative evaluation of WSD systems in several kinds of tasks. Particularly the obtained results in the task \#17 (All-words WSD on a Specific Domain) are reported in this paper. It is organized as follows, Section \ref{workpreliminary} presents relevant works on WSD. Section \ref{approach} describes the graph-based approach. Section \ref{experiments} give the carried out experiments, and finally, the conclusions and further work are given in Section \ref{discussion}.
\section{Background}\label{workpreliminary}
% Always give a unique lbel and use \ref{<label>} for cross-references and \cite{<label>} for bibliographic references use \sectionmark{} to alter or adjust the section heading in the running head
The last advances in WSD report that graph-based methods have been applied in the network analysis area, such as centrality models, and recently applied to linguistic knowledge bases, including unsupervised WSD. These methods explore the structure and link of the graph underlying a particular lexical knowledge base. Some important work in this area are presented by Navigli and Mirella \cite{lapata07}, Rada and Sinha \cite{rada}, Reddy \textit{et. al.} \cite{silva}, and Navigli \cite{navigli}. In these approaches a graph representation for senses (vertices) and relation (edges) is first build from a lexical knowledge base. According to its performance, similar works reported in the literature are based on clustering techniques. For example, Aguirre and L\'opez \cite{aguirre} proposed a method to group senses of words of fine granularity within one of coarse granularity to reduce the polysemy\footnote{The association of one word with two or more distinct meanings. }. Pedersen \textit{et. al.} \cite{pedersen05} proposed and unsupervised approach that solves name ambiguity by clustering the instances of a given name into groups, each of which is associated with a distinct underlying entity. In this approach, given a name, the actual contexts are grouped to represent the meanings of a word. In this paper we describe and evaluate a graph-based approach to assign the right sense to an ambiguous word by obtaining and merging context information and semantic similarity information; the main idea is mutually reinforcing between both techniques. The preliminary experiments carried out show promising response on WSD.
\section{Approach}\label{approach}
The graph-based representation relies on the combination of two techniques to select the right sense for a given ambiguous word: the context  and semantic similarity using a specific domain corpus, both techniques use information from WordNet (a lexical database). Figure \ref{meto} illustrates the proposed methodology, the complete description of the involved procedures is given in the following sections.
\begin{figure}
\centering
\caption{Proposal} \label{meto}
\includegraphics[width=0.7\textwidth]{metodologia.eps} %width=6cm
\end{figure}
\subsection{Pre-processing}\label{context}
 Given an ambiguous word and its context,\footnote{The parts that immediately precede and follow a word or passage and clarify its meaning.} a graph is constructed recovering senses of second order vectors\footnote{Given an ambiguous word the senses are retrieved from WordNet, each recovered sense again is tagged with the Part-Of-the-Speech to recover the additional senses for each word within the first sense} from WordNet as well as semantically related words extracted from external knowledge sources. The experiments were performed using the \textit{all-words} dataset on specific domain of SemEval 2010. The input file consist of several instances of the ambiguous word, each instance is a context in which a particular ambiguous word appears. So, the content of this file firstly is tagged\footnote{The assignment of parts of speech to each word in the document} (step 1 of the methodology), for this task, the Stanford parser is used  %Stanford\footnote{\url{http://nlp.stanford.edu/software/tagger.shtml}}
 to Part-of-Speech tag the test data. In step 2, the context window size is defined, different window sizes were tested in the experiments to determine how many words before and after a ambiguous word \textit{w} must be included in the context, so, the better resulting window size was $2\beta + 1$, with $\beta = 1$.\\
On the another hand in step $3$ an untagged corpus from the environment domain provided by SemEval $2010$ was used to extract keywords in the domain based on their frequency of occurrence. For example \textit{species, biodiversity, conservation}, etc. appear frequently in the environment domain. The first $20$ words, in descending order according to their frequency, were selected and combined in pairs to create a web query of length two according to Iosif and Potamianos \cite{Elias}. For example, for ``\textit{specie and biodiversity}'' the web querys were sent to several search engines (Google, Yahoo, Bing, HotBot, and MetaCrawler) according to the study of Aguilar \cite{aguilar}. Also the corpus is Part-Of-Speech tagged and stemming by using the Stanford parser. After the pre-processing phase, the semantically similarity terms for each ambiguous word are retrieved using Mutual Information (MI) \cite{Church,diana} (see Equation \ref{im}). The context window size was defined as $2\beta + 1$, $\beta=5$, according to Islam and Inkpen \cite{diana}. MI compares the probability of observing $X$ and $Y$ together ($f(X,Y)$) with the probabilities of observing $X$ and $Y$ independently ($f(X), f(Y)$).
\begin{equation}
IM(X,Y)=log_2\frac{f(X,Y)}{f(X) f(Y)} \label{im}
\end{equation}
\subsection{Graph construction}
Some semantic similarity measures have been implemented to quantify the degree of similarity between two words  using information drawn from WordNet hierarchy (see Ted Pedersen et. al. \cite{pedersen}). Particularly the Lin and Vector measures were taken into account because they have a good performance on WordNet hierarchy and results in the conducted research. Once contexts are recovered, the senses for each word in the context are retrieved from WordNet and weighted by a semantic similarity score using the WordNet::Similarity\footnote{\textit{This is a Perl module that implements a variety of semantic similarity and relatedness measures based on information found in the lexical database WordNet.}} score  between the senses of word \textit{w} and the senses for each word in the context. These measures return a real value indicating the degree of semantic similarity between a pair of concepts.\\

Formally let $C_w=\{c_1, c_2, \cdots, c_n\}$ the set of words in the context related to an ambiguous word \textit{w}. Let \textit{senses(w)} be the set of senses of \textit{w} and let \textit{senses($c_n$)} be the set of senses for a word in the context, a list ranked is returned in order descending of semantic similarity between \textit{w} and $c_n$, the items that maximize this score are filtered according to the statistical mean. These items constitute the named first order vectors.   
For each ambiguous word, two graph are built (see Figure \ref{meto}). In this representation, $G=(V, E, W)$ where $V$ are the vertices (concepts), $E$ are the edges (semantic relations) and $W$ (a strong link between two concepts or vertices). So, each recovered sense again is tagged with the Part-Of-the-Speech to recover the additional senses for each word within the first sense. These semantic relations for senses constitute the connections in the graph. Once the semantic graph is built, its structure and links are analyzed applying the algorithms described in the section \ref{measures}.
\subsection{Graph-based measures}\label{measures}
Vertex-based centrality is defined in order to measure the importance of a vertex in the graph; a vertex with high centrality score is usually considered more highly influential than other vertex in the graph. In the experiments, four algorithms have been implemented to determine which node is the most important examining the graph structure: in-degree, Key Problem Player, Jaccard, and Personalized PageRank, which are described bellow.\\
\textbf{Indegree} \cite{lapata07}, the simplest and most popular measure is degree centrality. In a undirected graph the degree of the vertex is the number of its attached links; it is a simple but effective measure of nodal importance. A node is important in a graph as many links converge to it. In the implementation, $V$ is the set of vertices on the graph and $v$ a vertex, see Equation \ref{degree}.
\begin{equation}
score(v)=\frac{indegree(v)}{\mid V \mid-1} \label{degree}
\end{equation}
\textbf{Key Problem Player} \cite{lapata07}, consists in find a set of nodes that is maximal	ly connected to all other nodes. Here, a vertex (denoted by $v$ and $u$, $V$ is the set of vertices) is considered important if it relatively close to all other vertices, see Equation \ref{kpp}.
\begin{equation}
kpp(v)=\frac{ \displaystyle\sum_{u\epsilon V:u\neq v}\frac{1}{d(u,v)}}{\mid V \mid-1} \label{kpp}
\end{equation}
\textbf{Jaccard coefficient} computes the probability that two vertex $i$ and $j$ will have a common neighbor $k$. According to Granovetter \cite{jaccard}, the link strength between two vertex depends on the overlap of their neighborhoods. If the overlap of neighborhoods between the vertex $i$ and vertex $j$ is large, it is considered that $i$ and $j$ have a strong tie. Otherwise, they are considered to have a weak link, see Equation \ref{jaccard}. 
\begin{equation}
Jaccard(i, j)=\frac{\mid N_i \cap N_j\mid}{\mid N_i \cup N_j\mid} \label{jaccard}
\end{equation}
where, $N_i$ and $N_j$ indicate the neighborhoods of the vertex $i$ and $j$ respectively.\\\\
\textbf{PageRank} is a link analysis algorithm traditionally applied on directed graphs, this algorithm can be also applied to undirected graphs, in which case the outdegree of a vertex is equal to the in-degree of the vertex. For this, an adaptation to the PageRank algorithm has been proposed, Personalized PageRank (PPRank) algorithm \cite{Aguirre09}. After running the algorithm, a score is associated with each vertex as shows the Equation \ref{pp}.
\begin{equation}
PR(v_i)=(1-\alpha) + \alpha * \displaystyle\sum_{v_j\epsilon In(v_i)} \frac{w_{ji}}{\sum_{v_k\epsilon Out(v_j)}w_nk}PR(v_j) \label{pp}
\end{equation}
According to the literature, the $\alpha$ is a factor which is usually set as 0.85 that is the value used in the evaluation of the implemented WSD prototype.\\\\
Finally the context and semantic similarity are combined (see step $4$ in Figure \ref{meto}) using the Equation \ref{combining} to get a ranked list in order descending according to their relevance so, the node with the highest value is selected as the right sense for the ambiguous word in question. Several experiments were carried out with different values for $\delta$ so, the better result was $\delta = 0.6$, thus we give more importance to semantic similarity because surprisingly the best results were obtained using the background documents.
\begin{equation}
  Score(v_i) = \frac{(1-\delta) Result(context) + \delta Result(corpus)} {2} \label{combining}
\end{equation}
\section{Experiments and Results} \label{experiments}
The porpose of this evaluation is show the relevancy of the hypothesis on the combination of contextual semantic relationships and semantic similarity of a domain contributes to WSD in an unsupervised manner, usually only the context or expanded context has been used to WSD. Therefore in this approach the context and semantic similarity information were integrated and used afterwards to assign the right sense to an ambiguous word. So, To evaluate the performance of the WSD approach and to be able to compare it with others algorithms, the experiments were carried on English test data of SemEval 2010 \cite{semeval10}. Precision (percentage of words that are tagged correctly, out of the words addressed by the system) and Recall (percentage of words that are tagged correctly, out of all words
in the test set) were used as evaluation measure. The dataset is a file with 1398 ambiguous words, 366 verbs, and 1032 nouns. The WSD approach was performed by using WordNet 3.0 as lexical database. The table \ref{tablaRes} shows the results obtained with each algorithm and the table \ref{resulSemEval} the results obtained in the WSD competition, the results shown that the proposed approach is low, equal to the Yoan's system and far from Anum's system when is evaluated using the PPRank algorithm this is because unlike the other ranking algorithms, PPRank takes into account edge weights when computing the score associated with the vertex. The other algorithms only make use of the content or links information, that could explain the worse performance. The results obtained by our approach were worse with those reported in the literature but the preliminary results of these algorithm are promising if we retrieve the semantically most similar words for an ambiguous word, this could help improve the process of disambiguation.
\begin{table}
\centering
\caption{Performance of connectivity measures in the proposal over the all-words dataset SemEval 2010}
\begin{tabular}{lllll}
\hline\noalign{\smallskip}
Algorithm & Precision (\%) & Recall (\%) & Nouns (\%) & Verbs (\%)\\ 
\noalign{\smallskip}
\hline
KPP & $33.94$ & $33.11$ & $33.52$ & $36.33$\\
Indegree & $33.87$ & $33.04$ & $31.68$ & $36.88$\\
Jaccard & $34.38$ & $33.54$ & $32.94$ & $35.24$\\
PPRank &$35.11$& $34.26$ & $31.78$ & $36.88$\\
\hline
\end{tabular}\label{tablaRes}
\end{table}

\begin{table}
\centering
\caption{Overall results for the domain WSD of SemEval 2010}
\begin{tabular}{lllll}
\hline\noalign{\smallskip}
Algorithm & Precision (\%) & Recall (\%) & Nouns (\%) & Verbs (\%)\\ 
\noalign{\smallskip}
\hline
Anup Kulkarni & $51.2$ & $49.5$ & $51.6$ & $43.4$\\
Andrew Tran & $50.6$ & $49.3$ & $51.6$ & $42.6$\\
Andrew Tran & $50.4$ & $49.1$ & $51.5$ & $42.5$\\
Aitor Soroa & $48.1$ & $48.1$ & $48.7$ & $46.2$\\
$\vdots$ & $\vdots$ & $\vdots$ & $\vdots$ & $\vdots$\\
Radu Ion & $35.1$ & $35.0$ & $34.4$ & $36.8$ \\
Yoan Gutierrez & $31.2$ & $30.3$ & $30.4$ & $30.1$\\
\textit{Random baseline} & $23.2$ & $23.2$ & $25.3$ & $17.2$\\
\hline
\end{tabular}\label{resulSemEval}
\end{table}
\section{Conclusions and Future Work}\label{discussion}
This paper describes an approach aimed to tackle the WSD problem on specific domain. The adaptation and integration of the tested techniques have been implemented in a first prototype. With this prototype, a semantic graph is obtained by using \textit{second order vectors} of senses recovered from WordNet; which corresponds to a specific ambiguous word. Thus, two semantic graph are obtained and evaluated given the context and words related to an ambiguous word. The approach have been only tested on a standard benchmark dataset released by SemEval 2010  in all-words domain specific WSD task. Tests on other datasets are desirable in the future, for example medicine and tourism. As further work, another measure for semantic similarity will be integrated, for example distributional similarity measure \cite{adam07}, which allows to get semantic terms with more accuracy.



\input{referenc}
\end{document}
