% This is "sig-alternate.tex" V1.9 April 2009
% This file should be compiled with V2.4 of "sig-alternate.cls" April 2009
%
% This example file demonstrates the use of the 'sig-alternate.cls'
% V2.4 LaTeX2e document class file. It is for those submitting
% articles to ACM Conference Proceedings WHO DO NOT WISH TO
% STRICTLY ADHERE TO THE SIGS (PUBS-BOARD-ENDORSED) STYLE.
% The 'sig-alternate.cls' file will produce a similar-looking,
% albeit, 'tighter' paper resulting in, invariably, fewer pages.
%
% ----------------------------------------------------------------------------------------------------------------
% This .tex file (and associated .cls V2.4) produces:
%       1) The Permission Statement
%       2) The Conference (location) Info information
%       3) The Copyright Line with ACM data
%       4) NO page numbers
%
% as against the acm_proc_article-sp.cls file which
% DOES NOT produce 1) thru' 3) above.
%
% Using 'sig-alternate.cls' you have control, however, from within
% the source .tex file, over both the CopyrightYear
% (defaulted to 200X) and the ACM Copyright Data
% (defaulted to X-XXXXX-XX-X/XX/XX).
% e.g.
% \CopyrightYear{2007} will cause 2007 to appear in the copyright line.
% \crdata{0-12345-67-8/90/12} will cause 0-12345-67-8/90/12 to appear in the copyright line.
%
% ---------------------------------------------------------------------------------------------------------------
% This .tex source is an example which *does* use
% the .bib file (from which the .bbl file % is produced).
% REMEMBER HOWEVER: After having produced the .bbl file,
% and prior to final submission, you *NEED* to 'insert'
% your .bbl file into your source .tex file so as to provide
% ONE 'self-contained' source file.
%
% ================= IF YOU HAVE QUESTIONS =======================
% Questions regarding the SIGS styles, SIGS policies and
% procedures, Conferences etc. should be sent to
% Adrienne Griscti (griscti@acm.org)
%
% Technical questions _only_ to
% Gerald Murray (murray@hq.acm.org)
% ===============================================================
%
% For tracking purposes - this is V1.9 - April 2009

\documentclass{sig-alternate}

  \pdfpagewidth=8.5truein
  \pdfpageheight=11truein  
   

\usepackage{graphicx} % nezbytne pro vkladani obrazku do dokumentu


\usepackage{amsmath}
\usepackage{amsfonts}


%\usepackage{pifont}


\usepackage{listings}
%\usepackage{setspace}
\usepackage{color}

\usepackage{wasysym}

%\usepackage{ifpdf}
\usepackage{url}
\usepackage{algorithmic}
\usepackage{algorithm}
%\numberwithin{algorithm}{chapter}

%\floatname{algorithm}{Procedure}
\usepackage{multirow}

\usepackage{balance}

%\usepackage{alltt}
%\usepackage{lscape}


%\newcommand{\theHalgorithm}{\arabic{algorithm}}

\usepackage{ltablex}
\newcommand{\PreserveBackslash}[1]{\let\temp=\\#1\let\\=\temp}
\let\PBS=\PreserveBackslash


\begin{document}



%
% --- Author Metadata here ---
\conferenceinfo{SAC'10}{March 22-26, 2010, Sierre, Switzerland.}
\CopyrightYear{2010} % Allows default copyright year (2002) to be over-ridden - IF NEED BE.
\crdata{978-1-60558-638-0/10/03}  % Allows default copyright data (X-XXXXX-XX-X/XX/XX) to be over-ridden.
% --- End of Author Metadata ---

\title{Adaptability in XML-to-Relational Mapping Strategies}

%
% You need the command \numberofauthors to handle the 'placement
% and alignment' of the authors beneath the title.
%
% For aesthetic reasons, we recommend 'three authors at a time'
% i.e. three 'name/affiliation blocks' be placed beneath the title.
%
% NOTE: You are NOT restricted in how many 'rows' of
% "name/affiliations" may appear. We just ask that you restrict
% the number of 'columns' to three.
%
% Because of the available 'opening page real-estate'
% we ask you to refrain from putting more than six authors
% (two rows with three columns) beneath the article title.
% More than six makes the first-page appear very cluttered indeed.
%
% Use the \alignauthor commands to handle the names
% and affiliations for an 'aesthetic maximum' of six authors.
% Add names, affiliations, addresses for
% the seventh etc. author(s) as the argument for the
% \additionalauthors command.
% These 'additional authors' will be output/set for you
% without further effort on your part as the last section in
% the body of your article BEFORE References or any Appendices.

\numberofauthors{1} %  in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
%
\author{
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three).
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
% 1st. author
\alignauthor
			 Lubo\v{s} Kuli\v{c}\\
			 \affaddr{Charles University}\\
			 \affaddr{Faculty of Mathematics and Physics}\\
       \affaddr{Department of Software Engineering}\\       
       \affaddr{Malostranske nam. 25}\\
			 \affaddr{118 00 Prague 1, Czech Republic}\\
       \email{kulic@ksi.mff.cuni.cz}
}
%\alignauthor
%			 ...\\
%			 \affaddr{...}\\
%			 \affaddr{...}\\
%       \affaddr{...}\\       
%       \affaddr{...}\\
%			 \affaddr{...}\\
%       \email{...}
%}
% There's nothing stopping you putting the seventh, eighth, etc.
% author on the opening page (as the 'third row') but we ask,
% for aesthetic reasons that you place these 'additional authors'
% in the \additional authors block, viz.

% Just remember to make sure that the TOTAL number of authors
% is the number that will appear on the first page PLUS the
% number that will appear in the \additionalauthors section.


\maketitle

\begin{abstract}
One of the ways how to manage XML documents is to exploit tools and functions offered by (object-)relational database systems. The key aim of such techniques is to find the optimal mapping strategy, i.e. the way the XML data are stored into relations. Currently the most efficient approaches, so-called adaptive methods, search a space of possible mappings and choose the one which suits the given sample data and query workload the most. In the paper we exploit a general heuristic method called Ant Colony Optimization (ACO) to solve the XML-to-relational mapping problem. We also adapt the algorithm so it can be used on a dynamic variant of the problem. The algorithms are evaluated in a set of experiments with a conclusion that the ACO-based algorithms are suitable for the problem and can be even used as a basis of a dynamic mapping mechanism.
\end{abstract}

% A category with the (minimum) three required fields
%\category{???}{???}{Miscellaneous}
%A category including the fourth, optional field follows...

\category{I.7.1}{Document and Text Processing}{Document and Text Editing - document management} 

\terms{Algorithms} 

\keywords{XML, storing XML in databases, dynamic adaptation}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Introduction}
\label{chapter:Introduction}

Since its birth, XML has become a frequently used format for representing, exchanging and manipulating data both in traditional applications and on the Internet. Because of that, there is naturally a need for efficient and reliable methods for managing and storing XML data. Many solutions of this task have been proposed using different approaches -- filesystem based, (O)RDBMS based, methods using object-oriented approach and native XML methods.

While all these approaches have their advantages and disadvantages, the most usable are in our opinion the methods using (Object-)Relational Database Management Systems. From various systems of transforming (or mapping) an XML tree into relations, the most promising (and suitable for further improvements) are so called adaptive (or flexible) schema-driven methods. 

These adaptive methods exploit a given schema of the data, various statistics and additional information about the application and try to optimize the mapping for it. This optimization usually starts with some initial relational schema and tries to improve it using a set of transformations such as inlining and outlining, splitting and merging types, etc. 

There is a number of papers and implementations based on these adaptive principles (the most interesting ones in our view are summarized in Section \ref{sec:RelWork}), however all of them have both advantages and disadvantages. Especially because of the disadvantages, we think there is still a space for improvements of these methods and in this paper we propose a method which addresses some of these drawbacks while still taking advantage of the adaptive approach. 

Here are in our view the most important drawbacks in current solutions and naturally these are the domains which we want to find an improvement in and, thus, form goals for our work:
\begin{itemize}
	\item \emph{Choice of heuristic} -- most of the proposed solutions use only a basic greedy search strategy, which has its disadvantages. So our first aim is to use a more sophisticated one.
	
	\item \emph{Set of schema transformations} -- existing algorithms use only a subset of proposed schema transformations. It could be interesting to evaluate the results of a solution which uses more of them and to compare their impact on the result.
	
	\item \emph{Dynamic adaptation} -- the main problem of all adaptive techniques is that the schema is adapted to future usage only once at the beginning. One of our goals is to study a possibility of a mechanism which would enable dynamic adaptation of used database schema.

\end{itemize}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Related Work}
\label{sec:RelWork}

The problem of XML-to-relational mapping and adaptive methods in particular has been studied in a number of papers. 

In \cite{klettke} the authors use a hybrid approach, where some fragments of an XML document are stored in a classical way using relational tables for elements and some fragments using a special \emph{XML datatype}. The main idea is that for the well structured document parts storing in relations is suitable, but for the semi-structured ones it is better to use the \emph{XML datatype}. 

In \cite{ramanath}, a \emph{FlexMap} framework is proposed which enhances the \emph{LegoDB} system from \cite{bohannon-01, bohannon-02}. It uses a greedy heuristic for mapping selection and \emph{StatiX} \cite{freire}, an XML Schema-aware framework, for collecting statistics of the input schema and sample data. These statistics are collected at the beginning, stored in the representation of the schema and updated while applying transformations so every XML document has to be fully processed only once.
 
\cite{zheng} uses a broader set of transformations -- a combination of transformations similar to those in the previous approaches. The search for (sub)optimum is done using the Hill Climbing heuristic.

Authors of this paper also propose quite complex cost function for their solution. It exploits sample data statistics gathered at the beginning of the algorithm and computes or estimates other necessary variables. All of these are then used to determine the cost of a query by simulating the cost of joining relational tables corresponding to particular schema fragments. 

In \cite{xiao-ling} so called \emph{Adjustable and Adaptable Method (AAM)} is proposed. This method searches for a (sub)optimal mapping using principles of genetic algorithms, which bring some level of randomness into decisions (in creating initial population, computing cost, mutation between populations etc.). 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Proposed Algorithm}
\label{sec:ProposedAlg}

\subsection{Adaptive XML-to-Relational Mapping Problem}
\label{sec:XMLRelProblem}
The adaptive algorithms for finding an optimal XML-to-relational mapping can be generalized as a simple process:
\begin{itemize}
	\item \emph{Input:} an initial XML schema $S_{init}$, a set of sample data and queries $D_{sample}$ and a cost function $f_{cost}$ which determines the efficiency of given relational schema $R$ considering the set $D_{sample}$.
	\item \emph{Additional parameters:} 
		\begin{itemize}
			\item a set of XML schema transformations $Trans = \{t_1, t_2, \ldots, t_{|Trans|} \}$ where every $t_i \in Trans$ transforms an XML schema $S_1$ to another XML schema $S_2 = t_i(S_1)$
			\item a fixed mapping strategy $f_{map}$ capable of transforming the given XML schema $S$ to a relational schema $R$
		\end{itemize}
	
	\item \emph{Mapping selection process:}
		\begin{itemize}
			\item Search $\Sigma$ -- the space of possible transformed schemas
			\item Find $S_{opt} \in \Sigma$ such that $f_{cost}(f_{map}(S_{opt}), D_{sample})$ is minimal.
		\end{itemize}
	\item \emph{Output:} a relational schema $R_{opt}$ which minimizes the cost function.
\end{itemize}

The most important aspect of adaptive algorithms is that while the set $Trans$ is always finite, the set of possible solutions $\Sigma$ can be infinite or, at least, very large. The problem was proven to be NP-hard even for a small set of transformations \cite{zheng, xiao-ling}. Results of the mapping algorithms are therefore usually only suboptimal.

\subsection{Schema Normalization}
\label{sec:NSchema}
% rovnou dat i normalizaci

In the description of a general mapping algorithm it is assumed that both the $f_{map}$ function and the schema transformations work with the XML schema whose actual form is not further specified. The schema is usually represented using some form of a graph which corresponds to the nature of an XML schema.

We decided to base our representation on XML Schema format. This format keeps the natural structure of the schema while being in fact an XML document so we can represent it as a DOM graph and use standardized APIs for parsing and tracing. However, we define some transformations and constraints on the schema which ease other parts of the system while technically they still keep the schema in a DOM tree.

Every logic which influences the actual relational schema should be kept in the mapping search algorithm, i.e. the internal schema representation should directly correspond to the relational storage. Because of that, the input schema needs to be normalized, so that it can be directly mapped to relations and all schema transformations have to produce only this normalized form. To achieve that we employ a concept of \emph{p-schemas} introduced in \cite{bohannon-01, bohannon-02}, we call our representation \emph{NSchema} (as Normalized Schema). NSchema fulfills all requirements for simple fixed mapping to relations and it is also enriched by some features which simplify the search algorithm.

The requirements for the NSchema are defined on the basis the requirements on \emph{p-schema} (as described in \cite{freire-impl}):
\begin{itemize}
	\item Every global element in the schema uses named (i.e. globally defined) type.
	\item There are no element references.
	\item There are no shared (i.e. used more than once) globally defined complex types and groups.
	\item Every type defines a structure that can be directly mapped into a relation. This means that any type definition (both local and global) can be a simple type definition or, in case of complex type, can contain either:
		\begin{itemize}
			\item Subelements with a simple (either built-in or user-defined) type.
			\item Complex regular expression (e.g. repetitions of elements, unions etc.) made of items which do not directly hold the textual content (thus can contain only elements with a globally defined complex type and/or group references).
		\end{itemize}
\end{itemize}

To transform the original XML Schema definition into NSchema we exploit an algorithm based on the normalization sketched in \cite{freire-impl}. We adjust this algorithm to be used with a DOM representation of an XML Schema document. The final algorithm consists of four steps:
\begin{itemize}
	\item Remove local type definitions in global elements.	
	\item Remove element references.
	\item Remove shared globally defined types and groups.
	\item Change the structure of defined types so that they can be directly mapped into relations.
\end{itemize}
While the first three steps are quite obvious, the fourth one does the most of the necessary work. It is based on the normalization algorithm from \cite{freire-impl}.

Such a normalized schema can be easily mapped to relations by simply creating a relation for every globally defined complex type and for every group, for details see \cite{freire-impl}.

\subsection{Schema Transformations}
\label{sec:Transformations}
The mapping selection algorithm uses a set of schema transformations to derive variations of $S_{init}$. The possible transformations proposed in various papers are summarized in the following list:
\begin{itemize}
	\item \emph{Inlining \& Outlining} -- The inline and outline transformations change the place where an element $e$ is stored -- either directly in a table $T$ corresponding to the parent type of $e$ (\emph{inlined}) or in a separate table (\emph{outlined}). 
		
	\item \emph{Commutativity \& Associativity} -- these transformations alter the structure of the schema and the order of the contained items. Associativity groups different elements into a single relational table, while commutativity changes the order of elements and, thus, can change which elements are grouped by associativity. In our solution the same effect can be achieved by applying a form of outlining, so we do not use these two transformations.
		
	\item \emph{Splitting \& Merging types or groups} -- Split breaks a shared type or a group into separate definitions while merge does the exact opposite. Because of the way we manage schema statistics, we perform all possible split operations at the beginning and only allow merge in the algorithm.
	
	\item \emph{Simplifying unions} -- this transformation exploits the fact that a union is always contained in a sequence of optional elements. This, of course, extends the set of valid documents but as the transformed schema is used only to create a relational one this does not cause any problem.
\end{itemize}

\subsection{Cost Estimation}
\label{sec:CostEstimation}
% vcetne ziskavani statistik
An important part of the mapping selection process is the cost function, $f_{cost}$. It is used to represent a RDBMS engine which creates an execution plan for every query and is usually capable of evaluating it by a cost value. In our solution, we have chosen to use the cost function presented in \cite{zheng}. It exploits statistics about the schema and simulates the join of the relational tables necessary for evaluating the query.

The $f_{cost}$ equation defined in \cite{zheng} has a couple of variables (derived from schema statistics) which have to be computed. The most important observation is that most of the variables can be computed only once, at the beginning, and they do not change after any transformation except of merging of shared types. This observation (based on a similar observation in \cite{bohannon-01,bohannon-02}) has only one precondition -- the statistics (and the derived variables) have to be obtained on a fully decomposed schema, i.e. a schema where there is no shared type or group. 

\subsection{Mapping Selection Algorithm}
\label{chapter:Algorithm}
% vcetne nejake kratke diskuse o parametrech

We choose a promising heuristic called \emph{Ant Colony Optimization (ACO)} (resp. its variant called Ant Colony System) for the mapping selection. ACO is a family of algorithms initially proposed in \cite{dorigo-first}. It is inspired by the behavior of ants looking for food \cite{dorigo} -- every ant spreads a substance called \emph{pheromone} on its way to and from the food source, other ants notice the presence of pheromone and tend to follow paths with higher concentration of the pheromone. This way the ants in fact communicate or share information they have learnt.

In our proposed solution we choose \emph{Ant Colony System (ACS)}, a variant of the ACO algorithm introduced in \cite{dorigo-ACS}. Its main contribution is the introduction of a so called local pheromone update which diversifies the search performed by subsequent ants so that it is less likely that different ants will construct the same solution.

The main algorithm using ACS is presented in Algorithm~\ref{alg:MainAlg}, in the next sections we analyze all its steps in detail.
\begin{algorithm}
\caption{Main application algorithm}
\label{alg:MainAlg}
\begin{algorithmic}
%\STATE \textbf{Input:} $S_{init}$, $Trans$, $f_{cost}$
\STATE Initiate the algorithm

\WHILE{Final condition}
\STATE Position each ant on a starting schema

\REPEAT

\STATE Perform a state transition
\STATE Perform local pheromone updating

\UNTIL{Iteration termination condition}

\STATE Perform global pheromone updating

\ENDWHILE
\end{algorithmic}
\end{algorithm}

\subsubsection{State Transition}
\label{ssec:StateTransition}

First, every ant $a$ (positioned on a schema $S_a$) constructs all possible steps from current position $Sol = \{ S | S = t_i(S_a) \wedge t_i \in Trans \}$. Then $f_{cost}(S)$ for every solution is counted and it is given a probability according to it. In original ACO/ACS the probability is counted as defined in equation~(\ref{eq:ACOProbOriginal}):
\begin{equation}
\label{eq:ACOProbOriginal}
	 \frac{\tau^{\alpha}_{ST} \cdot \eta^{\beta}_{ST} }
	{\sum_{U \in Sol} \tau^{\alpha}_{SU} \cdot \eta^{\beta}_{SU} }
\end{equation}
where $\tau_{ST}$ is the pheromone value for edge $ST$, $\eta_{ST}$ is a measure of quality of a given step, in our case the difference between costs of the schemas $S$ and $T$ and $\alpha$ and $\beta$ are parameters which influence the bias between the information from other ants (pheromone) and $f_{cost}$ (we use the settings proposed in \cite{dorigo-ACS} for them -- $\alpha = 1$ and $\beta = 2$).

An ant in a schema $S$ then chooses a schema $T$ to move to using so-called \emph{pseudorandom proportional} rule. It depends on a random variable $q$ uniformly distributed over $\langle0, 1\rangle$ and a parameter $q_0$ which controls how often the ants explore new possibilities and how often they follow the best possible way. On the grounds of our experiments we set it to $0.5$. The resulting schema is then chosen as follows:
\begin{equation}
\label{eq:ACSStateTransition}
T = \begin{cases}	
		\operatorname*{arg\,max}_{U \in Sol} \tau^{\alpha}_{SU} \cdot \eta^{\beta}_{SU} & \mbox{if $q \leq q_0$} \\
			ACO & \mbox{otherwise} \\
		\end{cases}		
\end{equation}	
where $ACO$ means the solution is chosen according to rule presented by original ACO -- as a random variable with distribution given in equation~(\ref{eq:ACOProbOriginal}). 


\subsubsection{Local Pheromone Updating}
\label{ssec:LocalPheromoneUpdate} 

After every step performed by an ant the pheromone of the last used edge is updated according to the following rule:
	\begin{equation}
		\label{eq:ACSLocalUpdate}
		\tau_{ST} \leftarrow (1 - \rho) \cdot \tau_{ST} + \rho \cdot \tau_0
	\end{equation}
where $0 < \rho < 1$ is a parameter of pheromone evaporating (set to $0.1$) and $\tau_0$ is the initial pheromone level. The authors of \cite{dorigo-ACS} propose a value of 
	\[ \frac{C}{f_{cost}(S_{est})} \]
	where $S_{est}$ is a solution found by another method and C is a constant. We use $S_{init}$ as $S_{est}$ and to be consistent with the (global) pheromone updating, we set $C = f_{cost}(S_{init})$, the resulting value is therefore $\tau_0 = 1$.

		
\subsubsection{Global Pheromone Updating}
\label{ssec:GlobalPheromoneUpdate}	
Only the ant with the best solution so far ($a_{best}$) deposits a pheromone along its path in this phase. The value of the pheromone for the edge between schemas $S$ and $T$ -- $\tau_{ST}$ -- is counted as follows:
	\begin{equation}
		\label{eq:ACSGlobalUpdate}
	\tau_{ST} \leftarrow (1 - \varphi) \cdot \tau_{ST} + \rho \cdot \Delta\tau_{ST}	
	\end{equation}
where 
\[
\Delta\tau_{ST} = \left\{
	\begin{array}{l l}		
	$\multirow{2}{*}{$\frac{C}{f_{cost}(a_{best})} $}$  & \mbox{edge $ST$ belongs to the} \\
																&	\mbox{global best solution found} \\
		&\\														
	0 & \mbox{otherwise} \\
	\end{array} \right.
\]
$f_{cost}(a_{best})$ is then the cost of the best solution found and $C$ is a constant. Authors of \cite{dorigo-ACS} simply use $1$ but experiments showed that this is not suitable for our situation. We set it to the cost of the initial schema.

	
\subsubsection{Placing Ants to a Starting Position}
\label{ssec:StartingPosition}

In the original ACO and ACS algorithms, the ants are placed randomly in the construction graph. However, in our situation we cannot actually construct the whole graph, so we start with all ants in $S_{init}$. 

After every iteration, we let the ants with a good solution in their positions, while the rest is placed to the original starting position, i.e. $S_{init}$. 

\subsubsection{Termination Conditions} 
\label{ssec:TerminationConditionIteration}

The original ACS algorithm assumes the solution is built from a set of solution components and an iteration ends when the solution is constructed. In our case (where every schema in the process is already a solution) we limited the number of steps in an iteration instead. 

Thus we grant every ant a number of steps dependent on the size of the problem (the number of types in $S_{init}$) and leave it to every ant whether it would use some transformation on every type or leave some types untransformed while others would contain a chain of transformations.

As for the final termination condition, there is usually a predefined maximal number of algorithm iterations and/or a threshold value of $f_{cost}$. We set the number of iterations to a number which depends on the number of types in $S_{init}$. 

Our experiments however showed, that the final solution is found much earlier, so we have proposed additional condition (in fact inspired by the greedy algorithms) -- we stop the algorithm when an improving solution has not been found for some number of iterations.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Dynamic Adaptation}
\label{chapter:DynamicAdaptation}
The main problem of all adaptive techniques is that the schema is adapted to future usage only once at the beginning. When the application or its usage change, the resulting efficiency may considerably worsen.

We explore the situation when the sample data and/or queries change (the input XML Schema could also change, but this would probably require a reconstruction of the relational schema and the data and query mapping mechanism anyway). For our problem, this change means a change of the sample documents and queries. Both of these changes influence the computation of the value of $f_{cost}$. 

The adaptation of an ACO algorithm to some dynamic problem is discussed for example in \cite{angus} and \cite{eyckelhof}. Both of them contain basically the same ideas:
\begin{itemize}
	\item The ants in the ACO algorithm are capable of adapting to a new situation from the nature of the algorithm -- even when some very good solution has been found, some of the ants will still explore different paths because of the state transition rule (see Section~\ref{ssec:StateTransition}).
	\item When some route has very high values of the pheromones, the exploration of new paths is less probable. The solution of that is to normalize the pheromone values when the situation changes.
\end{itemize}

Using these ideas, we adapt our algorithm -- we store the information about pheromone values and when the adaptation algorithm is used again with a different set $D_{sample}$, we use these values instead of the default value $\tau_0$. Before the algorithm is started again, the pheromone values are normalized (using a rule proposed in \cite{angus}) to lower the differences.

While the main mapping selection algorithm can be adjusted to the dynamic version of the problem quite easily, there are other aspects to be considered. Most importantly, even with a slight change in the resulting relational schema, the schema has to be altered and the stored data have to be either moved and adjusted on the way or reimported from the original XML documents. Both of these tasks are difficult to perform automatically and can be quite expensive. Hence the algorithm should also exploit the cost of the schema reconstruction and decide, whether it is worth the benefit of the newly adapted schema. Designing such an algorithm needs a further detailed analysis.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Experiments}
\label{sec:Experiments}

% kratce zminit implementaci - 2 vety
To test how the proposed solution performs, we have implemented the most important parts of the algorithm and conducted a sequence of experiments. For running the experiments we used a machine with Dual core 1.66 GHz CPU, 3 GB RAM and 5400 rpm hard disk. The algorithm has been implemented in the Java language\footnote{http://java.sun.com/} (version 1.6), the heap space for the virtual machine has been set to 1 GB.

%\footnote{\texttt{http://java.sun.com/}}

The data used for the experiments were obtained from XMark \cite{schmidt}, an XML-Benchmark project. Four sets of XPath queries were derived from the ones provided by XMark. The first three of them contain a couple of queries related to a subset of entities in the schema, while the fourth one combines the queries from the other three sets. The costs of $S_{init}$ according to these workloads are presented in Table~\ref{tab:InitCost}.
\begin{table*}
\begin{center}
\caption{Costs of the initial schema and the results of both algorithms}
\label{tab:InitCost}
	\begin{tabular}{|l|r|r|r|r|}
%	\begin{tabular}{|c|c|c|c|c|}
	\hline
	  & \textbf{Workload-1} & \textbf{Workload-2} & \textbf{Workload-3} & \textbf{Combined Workload} \\
	\hline
	 Initial Cost & $42\,510\,422$ & 12\,978\,202 & 59\,736\,854 & 128\,820\,398\\
	\hline
	 Greedy & $20\,220$ & $2\,814\,406$ & $2\,941\,622$ & $24\,874\,488$ \\
	\hline
	 Ant & $20\,204$ & $2\,785\,204$ & $2\,806\,366$ & $22\,140\,290$ \\
	\hline
	\end{tabular}
\end{center}
\end{table*}

We have also implemented a simple Greedy algorithm proposed in \cite{bohannon-01, bohannon-02} so we could compare the results of our algorithm to another one. In the following sections we refer to these two algorithms simply as Greedy and Ant.

\subsection{Overall Performance}
\label{sec:OverallPerf}
First we ran the algorithms on our four workloads to compare them, the results are in Table~\ref{tab:InitCost}. It can be seen, that the Ant algorithm gives better results on all of the workloads, but the difference only varies from almost $0\%$ to about $11\%$ and grows with the complexity of the query workload. 

\subsection{Diversification of the Search}
\label{sec:DiversificationSearch}
The Ant algorithm should search a wide subspace of $\Sigma$. To confirm that, we ran the algorithm on one of the workloads and kept track of the paths created by particular ants. Figure~\ref{fig:ExpDiverse} shows the number of ants in different positions in the first 10 steps of the algorithm. We can see that from the start of the algorithm the ants follow different paths and after a couple of steps about $8$ of them (on an average) are in different places (and this continues for the rest of the algorithm run).
%\begin{table}
%\begin{center}
%	\begin{tabular}{|p{2cm}|c|c|c|c|c|c|c|c|c|c|}
%	\begin{tabular}{|c|c|c|c|c|}
%	\hline
%	  \textbf{Step} & 1 &	2 &	3 &	4 &	5 &	6 &	7 &	8 &	9 &	10 \\
%	\hline
%	  \textbf{Positions} & 1	& 3	&	6	&	7	&	8	&	10&	9	&	8	&	8	&	8	\\
%	\hline	 
%	\end{tabular}
%\caption{Number of ants in different positions}
%\label{tab:Differentiation}
%\end{center}
%\end{table}
\begin{center}
\begin{figure}[htp]
	\label{fig:ExpDiverse}
	\includegraphics[scale=0.45]{test02}
\caption{Diversification of search}
\end{figure}
\end{center}
Another observation can be made from the behavior of the Ant algorithm -- it usually finds more than one solution with the best cost. This can be important in a real usage of the algorithm -- other characteristics may differ and also the cost model in the algorithm cannot simulate a particular RDBMS absolutely, so tests of different variants in the database can give different real costs.

\subsection{Impact of the Set of Transformations}
\label{sec:TransfImpact}
We implemented most of the transformations proposed in related papers so we tried to find out how the choice of the transformations influences the resulting cost. We ran both Greedy and Ant on workload-3 using different sets of transformations (all but Union, all but Merge, only Inline and Outline), the resulting costs of both the algorithms are shown in Table~\ref{tab:Transformations}.
\begin{table}
%\begin{center}
\caption{Impact of the set of transformations}
\label{tab:Transformations}
	\begin{tabular}{|l|r|r|r|}
%	\begin{tabular}{|c|c|c|c|c|}
	\hline
	  & \textbf{w/o union} & \textbf{w/o merge} & \textbf{inline outline} \\
	\hline
	 \textbf{Greedy} & $2\,941\,622$ & $2\,941\,622$ & $2\,941\,622$ \\
	\hline
	 \textbf{Ant} & $2\,807\,542$ & $2\,807\,626$ & $2\,941\,622$ \\
	\hline
	\end{tabular}
%\end{center}
\end{table}

The difference was not really remarkable which supports the hypothesis that the inlining and outlining are the most important transformations. However, the result can also be caused by $f_{cost}$ function model. The difference also grows with the size of the set. This indicates that the more possibilities to transform the schema the better the Ant algorithm is.

\subsection{Dynamic Adaptation}
\label{sec:Dynamic}
Finally we conducted a simple experiment to verify the benefits of the dynamic adaptation mechanism described in Section~\ref{chapter:DynamicAdaptation}. We let the Ant algorithm to find a solution on workload-3 and then we changed the workload to combined workload. But this time we provided the already used pheromone map to the algorithm (before that we performed the pheromone normalization, see Section~\ref{chapter:DynamicAdaptation}).

Table~\ref{tab:Dynamic} compares the resulting costs on the combined workload for Greedy, Ant which has run from scratch and an Ant which used the dynamic mechanism. The dynamic Ant found a worse solution than the standard one (by about $8\%$), but it is still better than the one from Greedy (by about $4\%$). 

The second row of the Table~\ref{tab:Dynamic} is much more important in our view. It compares the times of the runs of these three algorithms. As we can see, the time of the dynamic Ant is significantly better than the one of the standard Ant algorithm. In fact, it is only $11\%$ of the standard one. The Greedy algotithm is still faster but this is not surprising -- the Greedy algorithm can be seen as a significantly simplified version of the algorithm of one ant in the Ant algorithms. The Dynamic Ant algorithm performed only about six times slower then the Greedy one while it uses ten ants.
\begin{table}
%\begin{center}
\caption{Dynamic adaptation}
\label{tab:Dynamic}
\begin{tabular}{|l|r|r|r|}
%	\begin{tabular}{|l|>{\PBS\raggedleft}p{1.7cm}|>{\PBS\raggedleft}p{1.7cm}|>{\PBS\raggedleft}p{1.7cm}|}
%	\begin{tabular}{|c|c|c|c|c|}
	\hline
	  & \multicolumn{1}{c|}{\textbf{Greedy}} & \multicolumn{1}{p{1.7cm}|}{\textbf{Ant from start}} & 
	  	\multicolumn{1}{p{1.7cm}|}{\textbf{Dynamic Ant}} \\
	\hline
	 \textbf{Cost} & $24\,874\,488$ & $22\,140\,290$ & $23\,991\,984$  \\
%	 \textbf{Cost} & $24\,874\,488\;\;\;$ & $22\,140\,290\;\;\;$ & $23\,991\,984\;\;\;$  \\
	\hline
	 \textbf{Time (in \emph{s})} & $70.7$ & $3\,683.7$ & $439.8$ \\
	\hline
	\end{tabular}
%\end{center}
\end{table}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Conclusions and Future Work}
\label{sec:Conclusion}

In this paper we analyzed various methods of mapping XML data to a relational schema and proposed a solution addressing some drawbacks of existing ones.

The first goal was a choice of more sophisticated heuristic which would address the flaws of the ones used in most of the papers -- variants of a simple greedy algorithm. We designed an algorithm based on Ant Colony Optimization and our experiments have shown, that it in fact gives better mappings. 

The difference between our algorithm and a simple greedy one is not vast, nevertheless, the problem is quite suitable for the greedy heuristic since it does not really block possible steps by taking the best ones. And, as can be seen from our experiments, the lag between our algorithm and the greedy one grows with the complexity of the query workload used to estimate the cost. 

Second goal was to evaluate the influence of the selected set of schema transformations on the result. Conducted experiment has shown that although more transformations enable the algorithms to find a better solution, the difference is not radical. We can however see from the experiment that the more complex situation (i.e. more transformations) suits our algorithm better, i.e. the difference between its result and the result of the greedy algorithm enlarges with the number of transformations. 

Finally, we wanted to explore the possibility of a dynamic schema adaptation. We adapt our algorithm to the dynamic version of the problem. We performed a simple experiment to evaluate the dynamic version of the algorithm and the results are promising. 

We ran the dynamic version of the algorithm on one query workload first and than again on a changed one. Then we compared its results with both the greedy algorithm and the original version of our algorithm which ran only on the changed workload. Although the dynamic algorithm did not find the best solution, it was still better than the one from the greedy algorithm. And more importantly, the runtime of the dynamic algorithm was almost 10 times better than the time of the original algorithm.

Our future work will focus on further enhancing our algorithm based on ACO. There is a couple of variants of Ant Colony Optimization and it can be interesting to evaluate their ideas in our situation.

But in our view the most important feature enabled by the usage of ACO is the dynamic adaptation. And the need for a dynamic adaptation is simultaneously the most painful problem of the existing XML-to-relational mapping algorithms. Consequently, further analysis of possible solutions of the dynamic adaptation problem will be our most important future goal.



\section{Acknowledgement}

This work was supported in part by the Czech Science Foundation (GA\v{C}R), grant number 201/09/P364.



%\end{document}  % This is where a 'short' article might terminate

%ACKNOWLEDGMENTS are optional
%\section{Acknowledgments}
%This section is optional; it is a location for you
%to acknowledge grants, funding, editing assistance and
%what have you.  In the present case, for example, the
%authors would like to thank Gerald Murray of ACM for
%his help in codifying this \textit{Author's Guide}
%and the \textbf{.cls} and \textbf{.tex} files that it describes.

%
% The following two commands are all you need in the
% initial runs of your .tex file to
% produce the bibliography for the citations in your paper.
\bibliographystyle{abbrv}
%\bibliography{sigproc}  % sigproc.bib is the name of the Bibliography in this case
% You must have a proper ".bib" file
%  and remember to run:
% latex bibtex latex latex
% to resolve all references
%
% ACM needs 'a single self-contained file'!
%
%APPENDICES are optional
%\balancecolumns
%\appendix
%Appendix A
%\section{Headings in Appendices}
%The rules about hierarchical headings discussed above for
%the body of the article are different in the appendices.
%In the \textbf{appendix} environment, the command
%\textbf{section} is used to
%indicate the start of each Appendix, with alphabetic order
%designation (i.e. the first is A, the second B, etc.) and
%a title (if you include one).  So, if you need
%hierarchical structure
%\textit{within} an Appendix, start with \textbf{subsection} as the
%highest level. Here is an outline of the body of this
%document in Appendix-appropriate form:
%\subsection{Introduction}
%\subsection{The Body of the Paper}
%\subsubsection{Type Changes and  Special Characters}
%\subsubsection{Math Equations}
%\paragraph{Inline (In-text) Equations}
%\paragraph{Display Equations}
%\subsubsection{Citations}
%\subsubsection{Tables}
%\subsubsection{Figures}
%\subsubsection{Theorem-like Constructs}
%\subsubsection*{A Caveat for the \TeX\ Expert}
%\subsection{Conclusions}
%\subsection{Acknowledgments}
%\subsection{Additional Authors}
%This section is inserted by \LaTeX; you do not insert it.
%You just add the names and information in the
%\texttt{{\char'134}additionalauthors} command at the start
%of the document.

%\section{References}
%Generated by bibtex from your ~.bib file.  Run latex,
%then bibtex, then latex twice (to resolve references)
%to create the ~.bbl file.  Insert that ~.bbl file into
%the .tex source file and comment out
%the command \texttt{{\char'134}thebibliography}.
\balance % GM June 2007


\begin{thebibliography}{1}


\bibitem{angus} D.Angus, T. Hendtlass: \emph{Dynamic Ant Colony Optimisation}, Applied Intelligence vol. 23, pages 33-38, 2005.

\bibitem{bohannon-01} P. Bohannon, J. Freire, P. Roy, and J. Sim\'{e}on: \emph{From XML Schema to Relations: A Cost-based Approach to XML Storage}, Technical report, Bell Laboratories, 2001.


\bibitem{bohannon-02} P. Bohannon, J. Freire, P. Roy, and J. Sim\'{e}on: \emph{From XML Schema to Relations: A Cost-based Approach to XML Storage}, In ICDE '02: Proceedings of the 18th International Conference on Data Engineering, page 64, Washington, DC, USA, 2002.

\bibitem{dorigo-ACS} M. Dorigo, L. M. Gambardella: \emph{Ant colony system: A cooperative learning approach to the traveling salesman problem}, IEEE Transactions on Evolutionary Computation, vol. 1, no. 1, pages 53--66, 1997.

\bibitem{dorigo-first} M. Dorigo, V. Maniezzo, A. Colorni: \emph{Positive Feedback as a Search Strategy}, Technical Report No. 91-016, Politecnico di Milano, Italy, 1991.

\bibitem{dorigo} M. Dorigo, M. Birattari, T. Stutzle: \emph{Ant Colony Optimization - Artificial Ants as a Computational Intelligence Technique}, Technical Report No. TR/IRIDIA/2006-023, IRIDIA, Bruxelles, Belgium, September 2006. \url{http://iridia.ulb.ac.be/IridiaTrSeries/IridiaTr2006-023r001.pdf}

\bibitem{eyckelhof} C. J. Eyckelhof, M. Snoek: \emph{Ant Systems for a Dynamic TSP: Ants caught in a traffic jam}, Ant Algorithms -- Proc. of ANTS 2002 -- Third International Workshop, vol. 2463, pages 88--99, 2002.

\bibitem{freire} J. Freire, J. Haritsa, M. Ramanath, P. Roy, and J. Sim\'{e}on: \emph{Statix: Making XML count.} In Proc. of SIGMOD, 2002.

\bibitem{freire-impl} J. Freire, J. Sim\'{e}on: \emph{Adaptive XML Shredding: Architecture, Implementation, and Challenges}, Proceedings of the VLDB 2002 Workshop EEXTT and CAiSE 2002 Workshop DTWeb on Efficiency and Effectiveness of XML Tools and Techniques and Data Integration over the Web-Revised Papers, pages: 104 - 116, 2003.


\bibitem{klettke} M. Klettke, H. Mayer: \emph{XML and Object-Relational Database Systems -- Enhancing Structural Mappings Based On Statistics}, Lecture Notes in Computer Science, volume 1997, pages 151-164, 2000. \url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.36.1766}

\bibitem{ramanath} M. Ramanath, J. Freire, J. R. Haritsa, P. Roy: \emph{Searching for Efficient XML-to-Relational Mappings}, XSym 2003: Proc. Proc. of 1st International XML Database Symposium, volume 2824, pages 19-36, Berlin, Germany, 2003. \url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.7412}


\bibitem{schmidt} A. R. Schmidt, F. Waas, M. L. Kersten, D. Florescu, I. Manolescu, M. J. Carey, R. Busse: \emph{The XML Benchmark Project}, Technical Report INS-R0103, CWI, Amsterdam, The Netherlands, April 2001. \url{http://monetdb.cwi.nl/xml/}

\bibitem{xiao-ling} W. Xiao-ling, L. Jin-feng, D. Yi-sheng: \emph{An Adaptable and Adjustable Mapping from XML Data to Tables in RDB}, In Proc. of the VLDB 2002 Workshop EEXTT and CAiSE 2002 Workshop DTWeb, pages 117-130, Springer-Verlag, London, UK, 2003.

\bibitem{zheng} S. Zheng, J.-R. Wen, H. Lu: \emph{Cost-driven Storage Schema Selection for XML}, Proc. of DASFAA 2003, pages 337-344, Kyoto, Japan, 2003. \url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.3079}
  
\end{thebibliography}



% That's all folks!
\end{document}
