 % THIS IS SIGPROC-SP.TEX - VERSION 3.1
% WORKS WITH V3.2SP OF ACM_PROC_ARTICLE-SP.CLS
% APRIL 2009
%
% It is an example file showing how to use the 'acm_proc_article-sp.cls' V3.2SP
% LaTeX2e document class file for Conference Proceedings submissions.
% ---------------------------------------------------------------------------------- ------------------------------
% This .tex file (and associated .cls V3.2SP) *DOES NOT* produce:
%       1) The Permission Statement
%       2) The Conference (location) Info information
%       3) The Copyright Line with ACM data
%       4) Page numbering
% ---------------------------------------------------------------------------------------------------------------
% It is an example which *does* use the .bib file (from which the .bbl file
% is produced).
% REMEMBER HOWEVER: After having produced the .bbl file,
% and prior to final submission,
% you need to 'insert'  your .bbl file into your source .tex file so as to provide
% ONE 'self-contained' source file.
%
% Questions regarding SIGS should be sent to
% Adrienne Griscti ---> griscti@acm.org
%
% Questions/suggestions regarding the guidelines, .tex and .cls files, etc. to
% Gerald Murray ---> murray@hq.acm.org
%
% For tracking purposes - this is V3.1SP - APRIL 2009

%\documentclass{acm_proc_article-sp}
\documentclass{sig-alternate}
\usepackage{times,amsmath,epsfig}
 
\usepackage{listings,lipsum}

%
\usepackage{makeidx}  % allows for indexgeneration
%
\usepackage{xcolor}
\usepackage{tikz}
\usepackage{pgfplots}

\usepackage{graphicx}
\usepackage{stfloats}
\usepackage{amsmath}
\usepackage{bbm}
\usepackage{amssymb}
\usepackage{multirow}
\usepackage{mathrsfs}
\usepackage{url}
\usepackage{listings}
\usepackage{color}
\usepackage{wrapfig}
\usepackage{mathtools}
 
\usepackage{algorithm}
\usepackage{algorithmic} 

\lstloadlanguages{XML}

\renewcommand{\baselinestretch}{0.96}
\newcommand{\beq}{\begin{equation}}
\newcommand{\enq}{\end{equation}}
\newcounter{mytempeqncnt}
\newcommand{\bquote}{\begin{quote}}
\newcommand{\equote}{\end{quote}}

\newcommand{\todo}[1]{\textcolor{red}{@TODO: #1}}
\newcommand{\dtr}[1]{\textbf{\textit{#1}$^\textbf{[dtr]}$}}
\algsetup{linenosize=\small}
\newtheorem{definition}{Definition}
\sloppy 
\lstnewenvironment{listingh}[1][]{%
    \lstset{#1}%
}{%
    \vspace{-\baselineskip}%
}
\begin{document}
\title{Efficient and Effective On-the-fly Candidate Selection over Sparql Endpoints}
%
 
\numberofauthors{3} %  in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
%
\author{
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three).
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
% 1st. author
\alignauthor
Samur Araujo \\
       \affaddr{Delft University of Technology, }\\
       \affaddr{PO Box 5031, 2600 GA }\\
       \affaddr{Delft, the Netherlands}\\
       \email{s.f.cardosodearaujo@tudelft.nl}
% 2nd. author
\alignauthor
Duc Thanh Tran\\
       \affaddr{Karlsruher Institute of Technology}\\
       \affaddr{Germany}\\
       \email{duc.tran@kit.edu}
% 3rd. author
\alignauthor 
Arjen de Vries \\
       \affaddr{Delft University of Technology, }\\
       \affaddr{PO Box 5031, 2600 GA }\\
       \affaddr{Delft, the Netherlands}\\
       \email{a.p.devries@tudelft.nl}
}
 

\maketitle
\begin{abstract}
Instance matching is the problem of finding instances that refer to the same real-word entity. This task is a crucial step towards Web data integration. It is challenging due to the heterogeneous nature of Web data that often is only accessible through remote data endpoints. In this work, we drop the assumption that data is available in advance, and cast instance matching as the problem of answering queries over remote  endpoints. In particular, we propose \emph{instance-specific matching schemes} that for every instance use several queries to retrieve the various heterogeneous candidate matches \emph{on-the-fly} in a pay-as-you-go fashion. As the number of these candidate selection queries might be large and the cost of executing them over remote endpoints is high, we propose a \emph{heuristic-based search optimization framework} that is used to prune non-optimal queries. We show that compared to two baselines, the proposed solution not only yields higher quality results but also better runtime performance.  
\end{abstract}

%% A category with the (minimum) three required fields
%\category{H.3.3}{Information Storage and Retrieval}{ Information Search and Retrieval-Search process, Selection process, Information filtering;}
%
%\terms{Algorithms, Experimentation}
%
%\keywords{query optimization, instance matching, remote querying, data integration, linked data} % NOT required for Proceedings

   
\input{sec-intro}
 
\input{sec-problem}
\input{sec-keyselection}
\input{sec-approach}
 
\input{sec-experiment}
\input{sec-experiment2}
\input{sec-experiment3}
\input{sec-related}

\section{Conclusions}
We proposed a candidate selection approach that operates by querying remote data endpoints in the Linked Data. Our method focuses on optimizing the quality of the results as well as optimizing the execution time to obtain them. To achieve high quality results, we learn from the data, candidate selection schemes that are used to build effective instance-specific queries. To achieve time performance, we employ a heuristic-based search algorithm that learns to efficiently execute those queries. We evaluate our approach over two baseline, using two benchmark matching task, OAEI 2010 and 2011. The results indicate that the use of schema information in the queries improves considerably the quality of the results and the overall execution time (because limit the scope where the queries are computed). 
%While the type of query affects the quality of the results, no single unique query type can find matches for all instances; therefore, multiple queries have to be considered, as well as, an efficient way to execute them. 
Overall, compared to the best baseline, Sonda was 34\% faster and improved the quality by 13\% (in terms of F1). 
 
\bibliographystyle{abbrv}
\bibliography{wsdm}  % sigproc.bib is the name of the Bibliography in this case

\end{document} 