%%%%%%%%%%%%%%%%%%%%%%% file template.tex %%%%%%%%%%%%%%%%%%%%%%%%%
%
% This is a general template file for the LaTeX package SVJour3
% for Springer journals.          Springer Heidelberg 2010/09/16
%
% Copy it to a new file with a new name and use it as the basis
% for your article. Delete % signs as needed.
%
% This template includes a few options for different layouts and
% content for various journals. Please consult a previous issue of
% your journal as needed.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% First comes an example EPS file -- just ignore it and
% proceed on the \documentclass line
% your LaTeX will extract the file if required
%%% \begin{filecontents*}{example.eps}
%%% %!PS-Adobe-3.0 EPSF-3.0
%%% %%BoundingBox: 19 19 221 221
%%% %%CreationDate: Mon Sep 29 1997
%%% %%Creator: programmed by hand (JK)
%%% %%EndComments
%%% gsave
%%% newpath
%%%   20 20 moveto
%%%   20 220 lineto
%%%   220 220 lineto
%%%   220 20 lineto
%%% closepath
%%% 2 setlinewidth
%%% gsave
%%% .4 setgray fill
%%% grestore
%%% stroke
%%% grestore
%%% \end{filecontents*}
%
\RequirePackage{fix-cm}
%
%\documentclass{svjour3}                     % onecolumn (standard format)  
%\documentclass[smallcondensed]{svjour3}     % onecolumn (ditto)
%\documentclass[smallextended]{svjour3}       % onecolumn (second format)
%\documentclass[twocolumn]{svjour3}          % twocolumn
%

\NeedsTeXFormat{LaTeX2e}
%-----------------------------------------------------------
\documentclass[a4paper,12pt]{monografia}
\usepackage{amsmath,amsthm,amsfonts,amssymb}
\usepackage[mathcal]{eucal}
\usepackage{latexsym}
\usepackage[english]{babel}
\usepackage[latin1]{inputenc}
\usepackage{bm}
%\usepackage[all]{xy}

\def\bibnombre{References}
%\smartqed  % flush right qed marks, e.g. at end of proof
%
\usepackage{graphicx}
%
% \usepackage{mathptmx}      % use Times fonts if available on your TeX system
%
% insert here the call for the packages your document requires
%\usepackage{latexsym}
% etc.
%

\usepackage{amssymb}
% please place your own definitions here and don't use \def but
% \newcommand{}{}
\newcommand{\vs}{\vspace*{.5cm}}
\newcommand{\vso}{\vspace*{1cm}}
\newcommand{\vst}{\vspace*{2cm}}
\newcommand{\hs}{\hspace*{0.5cm}}
\newcommand{\hsi}{\hspace*{0.25cm}}
\newcommand{\hsm}{\hspace*{0.15cm}}
\newcommand{\hson}{\hspace*{0.75cm}}
\newcommand{\hso}{\hspace*{0.5cm}}
\newcommand{\hsom}{\hspace*{0.25cm}}
\newcommand{\hst}{\hspace*{2cm}}
%
% Insert the name of "your journal" with
%\journalname{Annals of Operations Research}
%
\begin{document}

%\textsuperscript{2}

\titulo{Making Heuristics Faster with Data Mining}
%\thanks{Grants or other notes
%about the article that should go on the front page should be
%placed here. General acknowledgments should be placed at the end of the article.}

%%% \subtitle{}

% \titlerunning{Short form of title}        % if too long for running head

%\author{Daniel Martins \and
%        Gabriel M. Vianna \and
%        Isabel Rosseti \and
%        Simone L. Martins \and
%        Alexandre Plastino
%}

\autor{Daniel Paes Martins} \nome{Daniel} \ultimonome{Martins}

%---------- Informe o Curso e Grau -----
\bacharelado \curso{Ci\^encia da Computa\c{c}\~ao} \ano{2011}
\data{09 de dezembro de 2011} % data da aprova��o
\cidade{Niter\'oi}
%
%----------Informa��es sobre a Institui��o -----------------
\instituicao{Universidade Federal Fluminense} \sigla{UFF}
\unidadeacademica{Instituto de Computa\c{c}\~ao}

%\authorrunning{Short form of author list} % if too long for running head

%\institute{Daniel Martins, Gabriel M. Vianna, Isabel Rosseti, Simone L. Martins, Alexandre Plastino  \at
%           Federal Fluminense University, Department of Computer Science \\
%           Niter\'{o}i, Rio de Janeiro, Brazil \\
%           \email{dmartins@id.uff.br, \{gvianna,rosseti,simone,plastino\}@ic.uff.br} %\\
%           \and
%           S. Author \at
%              second address
%}

%\date{Received: date / Accepted: date}
% The correct dates will be entered by the editor

\CDU{???} \areas{areas}
\npaginas{xx}  % total de p�ginas do trabalho

%------Nomes do Orientador, 1o. Examinador e 2o. Examinador-
\orientador{Alexandre Plastino de Carvalho}
%
\coorientador{Simone de Lima Martins} % opcional
%
\coorientadordois{Isabel Cristina Mello Rosseti}
%
\examinadorum{Luidi Gelabert Simonetti}
%
\examinadordois{Julliany Sales Brand\~ao}
%
%\examinadortres{Nome do Examinador 3}
%
%\examinadorquatro{Nome do Examinador 4}
%--------- T�tulos do Orientador 1o. e 2o. Examinadores ----
\ttorientador{D.Sc.}
%
\ttcoorientador{D.Sc.} % se digitado \coorientador
%
\ttcoorientadordois{D.Sc.} % se digitado \coorientador
%
\ttexaminadorum{D.Sc.}
%
\ttexaminadordois{M.Sc.}
\instorientador{Universidade Federal Fluminense}
\instcoorientador{Universidade Federal Fluminense}
\instcoorientadordois{Universidade Federal Fluminense}
\instexaminadorum{Universidade Federal Fluminense}
\instexaminadordois{Universidade Federal Fluminense}


\maketitle 

\agradecimento{Agradecimentos}
Aos meus orientadores, pela oportunidade incr\'{i}vel de trabalhar com eles, e pelo apoio inestim\'{a}vel durante esse ano. 

~~\\
\`A minha m\~{a}e, por ter sempre me incentivado a me dedicar aos estudos.

~~\\
Ao meu pai, por ter sempre me incentivado a buscar excel\^{e}ncia em tudo que eu fizesse.

~~\\
A toda a minha fam\'{i}lia, por ter sempre apoiado incondicionalmente as minhas decis\~{o}es.

~~\\
Ao querido amigo Hugo Barbalho, por todo apoio e aux\'{i}lio.

~~\\
Aos amigos Raphael Bottino, C\'{a}ssio Fernando, Leonardo Freitas, e demais frequentadores do DACC,  por terem me atrapalhado n\~{a}o s\'{o} na confec\c{c}\~{a}o desse TCC, como em todas as minhas responsabilidades.

~~\\
A todos os demais colegas da UFF, por terem contribu\'{i}do para que esses anos de faculdade fossem t\~{a}o agrad\'{a}veis.

\resumo{Resumo}
Metaheur\'{i}sticas representam uma importante classe de t\'{e}cnicas para a obten\c{c}\~{a}o de boas solu\c{c}\~{o}es, em tempo computacional razo\'{a}vel, para problemas de otimiza\c{c}\~{a}o combinat\'{o}ria. S\~{a}o procedimentos de alto n\'{i}vel de prop\'{o}sito geral que podem ser instaciadas para explorar eficientemente o espa\c{c}o de solu\c{c}\~{o}es de um problema combinat\'{o}rio espec\'{i}fico.
%
Um importante t\'{o}pico em metaheur\'{i}sticas \'{e} o desenvolvimento de metaheur\'{i}sticas h\'{i}bridas. Esses m\'{e}todos h\'{i}bridos resultam da combina\c{c}\~{a}o de conceitos e procedimentos de diferentes metaheur\'{i}sticas cl\'{a}ssicas ou da combina\c{c}\~{a}o de metaheur\'{i}sticas com conceitos e processos de outras \'{a}reas de pesquisa respons\'{a}veis por realizar tarefas espec\'{i}ficas que podem me-lhorar a t\'{e}cnica original.
%
Anteriormente, foi desenvolvida uma vers\~{a}o da metaheur\'{i}stica GRASP (Greedy Randomized Adaptative Search Procedures) que incorporou um procedimento de minera\c{c}\~{a}o de dados. Considerou-se hip\'{o}tese de que padr\~{o}es obtidos por uma t\'{e}cnica de minera\c{c}\~{a}o de dados, a partir de um conjunto de solu\c{c}\~{o}es sub-\'{o}timas de um problema de otimiza\c{c}\~{a}o combinat\'{o}ria, poderiam ser usados para guiar procedimentos metaheur\'{i}sticos na busca por melhores solu\c{c}\~{o}es. Foram obtidos resultados promissores aplicando-se essas id\'{e}ias a diferentes problemas de otimiza\c{c}\~{a}o combinat\'{o}ria, tais como: o problema do empacotamento de conjuntos, o problema de maximiza\c{c}\~{a}o da diversidade, e o problema de replica\c{c}\~{a}o de servidores para multicast confi\'{a}vel. 
%
O desafio deste trabalho \'{e} introduzir um procedimento de minera\c{c}\~{a}o de dados a um importante m\'etdodo da literatura que combina elementos de diferentes metaheur\'isticas para resolver o problema das $p$-medianas, conhecido como heur\'istica h\'ibrida $multistart$ -- HH, buscando obter evid\^{e}ncias de que, quando uma t\'{e}cnica \'{e} capaz de atingir a solu\c{c}\~{a}o \'{o}tima, ou uma solu\c{c}\~{a}o sub-\'{o}tima com pouca chance de melhora, os padr\~{o}es minerados podem ser usados para guiar a busca pela solu\c{c}\~{a}o \'{o}tima ou sub-\'{o}tima em 	menos tempo computacional. \`{A} nova vers\~ao denominou-se DM-HH. Experimentos computacionais, conduzidos em um conjuto de inst\^{a}ncias da literatura, mostraram que a nova vers\~{a}o da heur\'{i}stica h\'{i}brida foi capaz de atingir solu\c{c}\~{o}es \'{o}timas e sub-\'{o}timas, em m\'{e}dia 27,32\% mais r\'{a}pido que a estrat\'{e}gia original.

\noindent Palavras chave: Metaheur\'{i}sticas H\'{i}bridas, Problema das p-Medianas, Minera\c{c}\~{a}o de Dados.
% \PACS{PACS code1 \and PACS code2 \and more}
% \subclass{MSC code1 \and MSC code2 \and more}


\resumo{Abstract}
Hybrid metaheuristics -- developed based on the combination 
of metaheuristics with concepts and techniques from 
other research areas -- represent an important subject
in combinatorial optimization research.
%
Previously, a hybrid version of the GRASP (Greedy Randomized Adaptive Search Procedures) 
metaheuristic which incorporates a data mining procedure was developed and explored.
%
The base hypothesis was that patterns obtained by a data mining
technique, from a set of suboptimal solutions of a 
combinatorial optimization problem, could be used to guide 
metaheuristics procedures in the search for better solutions.
%
Promising results were obtained when applying these ideas
to different combinatorial problems, such as: the set
packing problem, the maximum diversity problem and
the server replication for reliable multicast problem.
%
The challenge of this work is to introduce
a data mining procedure into a state-of-the-art heuristic 
for a specific problem in order to give evidences that, 
when a technique is able to reach the optimal solution, 
or a near-optimal solution with little chance of improvements,
the mined patterns could be used to guide the search for the 
optimal or near-optimal solutions in less computational time.
%
We then developed a new version of a previously proposed
and state-of-the-art multistart hybrid heuristic
for the classical p-median problem, 
which combines elements of different traditional 
metaheuristics.
Computational experiments, conducted on a set of
instances from the literature, showed that the new version 
of the hybrid heuristic
was able to reach optimal and near-optimal solutions, on average,
27.32\% faster than the original strategy.

\noindent Keywords: Hybrid Metaheuristics, p-Median Problem, Data Mining.
% \PACS{PACS code1 \and PACS code2 \and more}
% \subclass{MSC code1 \and MSC code2 \and more}

\tableofcontents \thispagestyle{empty} \listoffigures
\thispagestyle{empty} \listoftables \thispagestyle{empty}


\pagestyle{ruledheader}
\chapter{Introduction}
\label{intro}
Metaheuristics represent an important class of techniques for obtaining 
good solutions, in reasonable time, for hard combinatorial optimization problems.
They are general purpose high-level procedures that can be instantiated to 
explore efficiently the solution space of a specific optimization problem~\cite{Osman96}.
Tabu search, genetic algorithms, simulated annealing, ant systems and GRASP
are examples of metaheuristics and have been applied to real-life problems 
of several areas of science over the last decades. 
An overview of heuristic search can be found in~\cite{Salhi06}.

An important topic in metaheuristics research is the development of hybrid 
metaheuristics~\cite{Talbi}. 
Such hybrid methods result from the combination of concepts and procedures 
of different classic metaheuristics or
from the combination of metaheuristics with concepts and processes 
from other research areas responsible for performing specific tasks 
that can improve the original technique.
An instance of the latter case, and subject of this work,
is the hybrid version of a multistart heuristic that incorporates
a data mining process.

Over the last years, a hybrid version of the
GRASP metaheuristic, called DM-GRASP~\cite{Santos08} was developed, which 
includes a data mining technique to improve the search through 
the solution space.

The GRASP (Greedy Randomized Adaptive Search Procedures) metaheuristic
\cite{Feo89,Feo95} has been successfully applied to solve many
combinatorial optimization problems, in several areas, such as
scheduling, routing, partitioning, location and assignment~\cite{Festa1,Festa2}.
The solution search process employed by GRASP is performed iteratively 
and each iteration consists of two phases: construction and local search. 
The construction phase builds a feasible solution and then its neighborhood
is explored by the local search in order to find a better one. 
The result is the best solution found over all iterations.

Data mining refers to the automatic extraction of
knowledge from datasets~\cite{Han,Witten}. The extracted knowledge,
expressed in terms of patterns or rules, represents important
features of the dataset at hand. Hence, data mining provides
a means to better understand features implicit in raw data,
which is fundamental in a decision-making process.

The hybridization of GRASP with a data mining process was first 
introduced and applied to the set packing problem~\cite{Ribeiro04,Ribeiro06}. 
The basic idea was to obtain knowledge from the solutions obtained, 
in previous iterations, to guide the search for the next iterations. 
Patterns extracted from good quality solutions could be used to 
guide the search, leading to a more effective exploration of the solution space.
The resulting method, the DM-GRASP metaheuristic, achieved successful
results also when evaluated on two other combinatorial problems, namely, 
the maximum diversity problem~\cite{Santos05} and the server replication for reliable
multicast problem~\cite{Santos06}.
 
The idea of keeping track of recurrent good sub-solutions and fixing variables
have been successfully explored coupled with other heuristics.
Lin and Kernighan developed a 
heuristic for the travelling salesman problem~\cite{Lin},
where they fix some links observed to occur in a number of previously locally optimum tours 
found by the algorithm. 
Lodi et al.\cite{Lodi} developed an evolutionary heuristic for quadratic 0-1 programming, 
where they present an intensification strategy used in a genetic algorithm to fix variables, 
which can have their values fixed during all steps of the algorithm or only during a given number of steps. 
Fleurent and Glover~\cite{Fleurent} described 
strategies for the quadratic assignment problem, 
in which, during the constructive procedure, they select elements to be inserted in a solution from 
an elite set containing the best solutions generated so far. 
Our idea is to use more elaborated techniques found in the data mining research area 
to search for good patterns extracted from a set of high quality solutions.  

The previous DM-GRASP implementations were developed over a common framework, 
divided in two parts. In the first one, a number of GRASP iterations are executed 
and the best solutions are stored in an elite set. Then, a data mining algorithm is 
used to extract patterns from this set of sub-optimal solutions. In the second part, 
the GRASP iterations use the mined patterns to construct new solutions. 
In this framework, the data mining process is performed after exactly half of the
GRASP iterations. According to the taxonomy of hybrid metaheuristics
proposed in~\cite{Talbi}, the DM-GRASP framework can be
classified as a high-level and relay hybrid metaheuristic.
It is considered high-level, since the data mining technique
and GRASP are self-contained and it is a relay hybridization
because GRASP, the data mining process, and GRASP
again are applied in a pipeline fashion.

The challenge of this work is to introduce
a data mining procedure into a state-of-the-art heuristic 
for a specific problem in order to give evidences that, 
when a technique is able to reach the optimal solution, 
or a near-optimal solution with little chance of improvements,
the mined patterns could be used to guide the search for the 
optimal or near optimal solutions in less computational time.

We chose, as the state-of-the-art algorithm to be the base of our study, 
the heuristic proposed in~\cite{Resende04} for the classical p-median problem. 
It is a multistart hybrid heuristic which combines elements of different traditional metaheuristics.
Computational experiments conducted on instances from the literature showed that this 
strategy was able to perform at least as well as other methods, 
and often better in terms of both running time and solution quality. 
The solutions obtained were always within 0.1\% of the best known upper bounds.

Basically, this strategy is a multistart iterative method and each iteration 
constructs randomly a solution, which is then submitted to local search. 
In each iteration, the solution obtained by local search is combined with 
one solution from an elite set, made of the best reached solutions, 
through a path-relinking process~\cite{Glover}.
After all iterations are concluded, a post-optimization
phase is activated in which elite solutions are combined with each other. 
We will refer to this proposed strategy as Hybrid Heuristic~(HH).

We then developed the Data Mining Hybrid Heuristic~(DM-HH), introducing 
a data mining procedure into the HH implementation in order to explore
patterns extracted from a set of good quality solutions. These patterns
are used in the construction of new solutions, leading to a more effective 
search through the solution space.
Computational experiments, comparing the HH and DM-HH strategies and
conducted on a set of instances from the literature, showed that the new 
data mining version of the hybrid heuristic was able to reach optimal and 
near-optimal solutions, on average, 27.32\% faster than the original strategy.

Another contribution of this work is to show that not only the traditional
GRASP metaheuristic but also other more sophisticated heuristic, improved with
a memory-based intensification mechanism, like the path-relinking technique,
can benefit from the incorporation of a data mining procedure.

The remaining of this paper is organized as follows. 
In Section 2, we present the p-median problem and review the main concepts 
and the structure of the state-of-the-art Hybrid Heuristic for this 
combinatorial problem.
The Data Mining Hybrid Heuristic, proposed in this
work, is presented in Section 3.
The computational experiments conducted to compare both strategies
are reported and discussed in Section 4. 
In Section 5, we illustrate and justify the behavior 
of both strategies with some additional analysis.
Finally, concluding remarks and some future works 
are pointed out in Section 6.

\chapter{Multistart Hybrid Heuristic}
\label{sec:2}
As stated in \cite{Resende04}, given a set $F$ of $m$ potential facilities, a set $U$ of $n$ customers, a distance function $d: U \times F \rightarrow \mathbb{R}$, 
and a constant $p \leq m$, the $p$-median problem consists of determining which $p$ facilities to open so as to minimize the sum of 
the distances from each costumer to its closest open facility. It is a well-known NP-hard problem \cite{Kariv}, with numerous 
applications in location science \cite{Tansel} and clustering \cite{Rao71,Vin69}.

In \cite{Resende04}, Resende and Werneck proposed a state-of-art multistart hybrid heuristic for the $p$-median problem, that combined 
elements of several traditional metaheuristics to find near-optimal solutions to this problem. Figure \ref{fig:pseudo_hyb} summarizes 
this algorithm. Each iteration of this algorithm consists of the randomized construction of a solution (line 4), which is then submitted 
to local search (line 5). After this, a solution is chosen from the pool of elite solutions, made with some of the best solutions found in 
previous iterations (line 6), and is combined with the solution obtained by the local search through a process called 
path-relinking \cite{Glover} (lines 7-10). Furthermore, after all iterations are completed, this algorithm executes the second phase, 
called post-optimization, in which elite solutions are combined with each other (line 13), and the best solution found after the 
post-optimization phase execution is taken as result.

\begin{figure}[hbt]
\centering
\framebox [130mm][l] {
\begin{minipage} {130mm}
%\footnotesize
{\bf procedure} {\tt Hybrid\_Heuristic}($seed$,$maxit$,{\it elitesize})
\newline \hsm 1.\hso \textbf{Randomize}($seed$);
\newline \hsm 2.\hso \textbf{Init}($elite\_set$,{\it elitesize});
\newline \hsm 3.\hso \textbf{for} $it \leftarrow 1$ \textbf{to} $maxit$ \textbf{do}
\newline \hsm 4.\hso \hsi $S \leftarrow $ {\tt Construction\_p\_Median}();
\newline \hsm 5.\hso \hsi $S \leftarrow $ {\tt Local\_Search\_p\_Median}($S$);
\newline \hsm 6.\hso \hsi $S' \leftarrow$ {\tt Select}($elite\_set$);
\newline \hsm 7.\hso \hsi \textbf{if}($S' \not = $ $\varnothing$)
\newline \hsm 8.\hso \hsi \hsi $S' \leftarrow $ {\tt Path\_Relinking}($S$,$S'$);
\newline \hsm 9.\hso \hsi \hsi {\tt Update\_Elite}($elite\_set$,$S'$);
\newline \hsm 10.\hso \hsi \textbf{end if}
\newline \hsm 11.\hso \hsi {\tt Update\_Elite}($elite\_set$,$S$);
\newline \hsm 12.\hso \textbf{end for};
\newline \hsm 13.\hso $S \leftarrow $\textbf{Post\_Optimization}($elite\_set$);
\newline \hsm 14.\hso \textbf{return} $S$;
\end{minipage}
}
\caption{Pseudo-code of the hybrid heuristic.}
\label{fig:pseudo_hyb}
\end{figure}

The hybrid heuristic was compared with VNS (Variable Neighborhood Search) \cite{HM97}, 
VNDS (Variable Neighborhood Decomposition Search) \cite{HMP01}, 
LOPT (Local Optimization) \cite{Taillard03}, DEC (Decomposition Procedure) \cite{Taillard03}, 
LSH (Lagrangean Surrogate Heuristic) \cite{SL00}, 
and CGLS (Column Generation with Lagrangean Surrogate Relaxation)~\cite{SL00}. 
Empirical results on instances from the literature attested the robustness of the hybrid 
algorithm, which performed at least as well as other methods, and often better in terms of 
both running time and solution quality. In all cases the solutions obtained by hybrid heuristic 
were within 0.1\% of the best known upper bounds.

In the next subsections, we describe the main concepts and the structure of the hybrid heuristic.

\section{Construction Phase} 
Figure \ref{construction} summarizes the construction method, which is based on the called
standard greedy algorithm \cite{CFN77,Whitaker83}. This algorithm starts with the empty solution and 
adds facilities, one at time, choosing the most profitable in each iteration. The standard strategy
is deterministic because it finds the same solution in all iterations. The construction algorithm 
of \cite{Resende04} is similar to this standard approach, but, instead of selecting the best 
among all possible options, it only considers $q < m$ possible insertions (line 5), 
chosen uniformly at random, in each iteration. The most profitable among these options is selected (line 6). 
The number $q =  \lceil log_2(m/p) \rceil$ is chosen small enough to reduce the running time of the 
algorithm (when compared to the standard greedy) and to ensure a fair degree of randomization. 

\begin{figure}[hbt]
\centering
\framebox [130mm][l] {
\begin{minipage} {130mm}
%\footnotesize
{\bf procedure} {\tt Construction\_p\_Median}()
\newline \hsm 1.\hso \textbf{Init}($n$,$m$,$p$);
\newline \hsm 2.\hso $q$ $\leftarrow$ $\lceil$ $log_2(m/p)$ $\rceil$;
\newline \hsm 3.\hso $S$ $\leftarrow$ $\varnothing$;
\newline \hsm 4.\hso \textbf{for} $it \leftarrow 1$ to $p$ \textbf{do}
\newline \hsm 5.\hso \hsi $RCL$ $\leftarrow$ \textbf{Create\_RCL}($q$); 
\newline \hsm 6.\hso \hsi $u \leftarrow $ {\tt Best\_Possibility}($RCL$);
\newline \hsm 7.\hso \hsi $S \leftarrow S \cup \{u\}$;
\newline \hsm 8.\hso \textbf{end for};
\newline \hsm 9.\hso \textbf{return} $S$;
\end{minipage}
}
\caption{Pseudo-code of the construction phase.}
\label{construction}
\end{figure}

\section{Local Search}
The adopted local search procedure for the p-median problem, originally proposed by
Teitz and Bart \cite{TB68}, is based on swapping facilities. 
Given an initial solution~$S$, the procedure determines, for each facility $f \not \in S$, 
which facility $g \in S$ (if any) would improve the solution the most if $f$ and $g$ 
were interchanged (i.e., if $f$ were opened and $g$ closed). If there is one such 
improving move, $f$ and $g$ are interchanged. The procedure continues until no 
improving interchange can be made, in which case a local minimum has been found.

\section{Path-Relinking}
Path-relinking was originally proposed by Glover~\cite{Glover} as an intensification strategy exploring trajectories 
connecting elite solutions obtained by tabu search or scatter search~\cite{Glo00a,GloLagMar03}. 
The procedure starts by computing the symmetric difference between the two solutions, 
i.e., the set of moves needed to reach the solution target $S_t$ from solution source $S_s$. The current solution $S$ is 
initialized with $S_s$ and a path of solutions is generated linking $S_s$ and $S_t$. At each step, the procedure examines 
all moves from the current solution $S$ and selects the one which results in the least cost solution. The best move is 
made and the set of available moves is updated. If necessary, the best solution along the path under construction is updated. 
The procedure terminates when $S_t$ is reached and the best solution found after the path-relinking phase execution is taken as 
result.

\section{Post-Optimization}
The post-optimization phase in the hybrid heuristic combines the solutions of the pool of elite 
solutions to obtain even better ones. Each solution in the pool is combined with each other by 
path-relinking. The solutions generated by this process are added to a new pool of elite solutions, 
representing a new generation. The post-optimization algorithm proceeds, executing on the
new generation, until it creates a generation that does not improve upon the previous one. 

\chapter{Data Mining Hybrid Heuristic}
\label{sec:3}
In this section, we propose a new version of the
hybrid heuristic (HH), presented in the previous section,
which incorporates a data mining process, called DM-HH, 
to solve the $p$-median problem. 
The basic concept of incorporating a data mining process 
is that patterns found in high quality solutions obtained in
earlier iterations of the HH strategy
can be used to conduct and improve the search process.

The DM-HH procedure is composed of two phases.
The first one 
% is called the elite set generation phase, which 
consists of executing $maxit/2$ pure HH iterations to obtain a 
set of different solutions. 
The~$d$ best solutions from this set of solutions compose 
the elite set for mining.

After this first phase, the data mining process is applied. 
It is responsible for extracting a set of patterns from the 
elite set. The patterns to be mined are sets of elements that 
frequently appear in solutions from the elite set. This extraction
of patterns characterizes a frequent itemset mining application \cite{Han}.
A frequent itemset mined with support $s$\% represents a set
of elements that occur in $s$\% of the elite solutions.

Next, the second phase
%, called hybrid phase, 
is performed. In this part, another $maxit/2$ slightly different 
HH iterations are executed. In these $maxit/2$ iterations, 
an adapted construction phase starts building a solution guided by a mined 
pattern selected from the set of mined patterns. Initially, all elements of the selected pattern are 
inserted into the partial solution, from which a complete solution will 
be built executing the standard construction procedure. This way, all constructed 
solutions will contain the elements of the selected pattern.

The pseudo-code of the DM-HH for the $p$-median problem is illustrated in Figure~\ref{fig:pseudo_hyb_hh}.
In lines~2 and~3, the elite set of the original heuristic and the elite set for mining are initialized with the empty set. 
The loop from line~4 to line~15 corresponds to the first phase of the strategy, in which pure HH is performed for $maxit/2$ 
iterations. The original construction method is executed in line~5, followed by the local search 
method in line~6. In line~7, a solution is chosen from the pool of elite solutions of the original 
approach to be combined, in line~9, using the path-relinking process with the solution obtained by the local search. 
In lines 10 to 14, the elite set of the original algorithm and the elite set for mining, 
composed of $d$ solutions, are updated with the solution obtained by the path-relinking 
process and with the solution obtained by the local search.
In line~16, the data mining procedure extracts $t$ patterns from the elite set, which are
inserted in decreasing order of pattern size in the set of patterns.
The loop from line~17 to line~27 corresponds to the second phase of the strategy. 
In line~18, one pattern is picked from the set of patterns in a round-robin way.
Then the adapted construction procedure is performed in line~19, using the selected pattern as a starting point. 
In line~20, the local search is executed. After this, a solution is chosen from the pool of elite solutions of the original 
approach to be combined using the path-relinking with the solution obtained by the local search (lines~21 to 26). 
After all iterations are completed, this algorithm executes the post-optimization in line~28 and the best solution 
found after the post-optimization phase is taken as result.

\begin{figure}[hbt]
\centering
\framebox [130mm][l] {
\begin{minipage} {130mm}
%\footnotesize
{\bf procedure} {\tt DM\_HH}($seed$,$maxit$,{\it elitesize},$t$)
\newline \hsm 1.\hso \textbf{Randomize}($seed$);
\newline \hsm 2.\hso $elite\_set \leftarrow \varnothing $;
\newline \hsm 3.\hso $elite\_set\_DM \leftarrow \varnothing $;
\newline \hsm 4.\hso \textbf{for} $it \leftarrow 1$ \textbf{to} $maxit/2$ \textbf{do}
\newline \hsm 5.\hso \hsi $S \leftarrow $ {\tt Construction\_p\_Median}();
\newline \hsm 6.\hso \hsi $S \leftarrow $ {\tt Local\_Search\_p\_Median}($S$);
\newline \hsm 7.\hso \hsi $S' \leftarrow$ {\tt Select}($elite\_set$);
\newline \hsm 8.\hso \hsi \textbf{if}($S' \not = $ $\varnothing$)
\newline \hsm 9.\hso \hsi \hsi $S' \leftarrow $ {\tt Path\_Relinking}($S$,$S'$);
\newline \hsm 10.\hso \hsi \hsi {\tt Update\_Elite}($elite\_set$,$S'$);
\newline \hsm 11.\hso \hsi \hsi {\tt Update\_Elite}($elite\_set\_DM$,$S'$);
\newline \hsm 12.\hso \hsi \textbf{end if}
\newline \hsm 13.\hso \hsi {\tt Update\_Elite}($elite\_set$,$S$);
\newline \hsm 14.\hso \hsi {\tt Update\_Elite}($elite\_set\_DM$,$S$);
\newline \hsm 15.\hso \textbf{end for};
\newline \hsm 16.\hso $patterns\_set \leftarrow $ {\tt Mine}($elite\_set\_DM$, $t$);
\newline \hsm 17.\hso \textbf{for} $it \leftarrow 1$ \textbf{to} $maxit/2$ \textbf{do}
\newline \hsm 18.\hso \hsi $pattern \leftarrow $ {\tt Select\_Next\_Largest\_Pattern}($patterns\_set$);
\newline \hsm 19.\hso \hsi $S \leftarrow $ {\tt Adapted\_Construction\_p\_Median}($pattern$);
\newline \hsm 20.\hso \hsi $S \leftarrow $ {\tt Local\_Search\_p\_Median}($S$);
\newline \hsm 21.\hso \hsi $S' \leftarrow$ {\tt Select}($elite\_set$);
\newline \hsm 22.\hso \hsi \textbf{if}($S' \not = $ $\varnothing$)
\newline \hsm 23.\hso \hsi \hsi $S' \leftarrow $ {\tt Path\_Relinking}($S$,$S'$);
\newline \hsm 24.\hso \hsi \hsi {\tt Update\_Elite}($elite\_set$,$S'$);
\newline \hsm 25.\hso \hsi \textbf{end if}
\newline \hsm 26.\hso \hsi {\tt Update\_Elite}($elite\_set$,$S$);
\newline \hsm 27.\hso \textbf{end for};
\newline \hsm 28.\hso $S \leftarrow $\textbf{Post\_Optimization}($elite\_set$);
\newline \hsm 29.\hso \textbf{return} $S$;
\end{minipage}
}
\caption{Pseudo-code of the DM-HH}
\label{fig:pseudo_hyb_hh}
\end{figure}


In Figure~\ref{fig:pseudo_hyb_constr}, the pseudo-code of the adapted construction 
is illustrated. It is quite similar to the code described in 
Figure~\ref{construction} with the difference that, instead of beginning 
the solution with an empty set, in line~3, it starts with all elements of the pattern 
supplied as a parameter. 
In the loop from line~4 to line~8, the solution is completed using
the original construction method.

\begin{figure}[hbt]
\centering
\framebox [130mm][l] {
\begin{minipage} {130mm}
%\footnotesize
{\bf procedure} {\tt Adapted\_Construction\_p\_Median}($pattern$)
\newline \hsm 1.\hso \textbf{Init}($n$,$m$,$p$);
\newline \hsm 2.\hso $q$ $\leftarrow$ $\lceil$ $log_2(m/p)$ $\rceil$;
\newline \hsm 3.\hso $S$ $\leftarrow$ $pattern$;
\newline \hsm 4.\hso \textbf{for} $it \leftarrow $$|pattern|+1$ to $p$ \textbf{do}
\newline \hsm 5.\hso \hsi $RCL$ $\leftarrow$ \textbf{Create\_RCL}($q$); 
\newline \hsm 6.\hso \hsi $u \leftarrow $ {\tt Best\_Possibility}($RCL$);
\newline \hsm 7.\hso \hsi $S \leftarrow S \cup \{u\}$;
\newline \hsm 8.\hso \textbf{end for};
\newline \hsm 9.\hso \textbf{return} $S$;
\end{minipage}
}
\caption{Pseudo-code of the adapted construction}
\label{fig:pseudo_hyb_constr}
\end{figure}

The extraction of patterns from the elite set, which
is activated in line 16 of the pseudo-code presented in 
Figure~\ref{fig:pseudo_hyb_hh}, corresponds to the
well-known frequent itemset mining (FIM) task. 
The FIM problem can be defined as follows.

Let $I = \{i_{1}, i_{2}, ..., i_{n}\}$ be a set of items. 
A transaction is a subset of $I$ and a dataset $D$ is a set of transactions.
A frequent itemset $F$, with support $s$, is a subset of $I$ which 
occurs in at least $s\%$ of the transactions in $D$.  
The FIM problem consists of extracting all frequent itemset from
a dataset $D$ with a minimum support specified as a parameter.
During the last two decades, many algorithms have been proposed to
efficiently mine frequent itemsets \cite{Agrawal,Goethals,Han00,Orlando}.

In this work, the useful patterns to be mined are sets of elements that
commonly appear in sub-optimal solutions of the $p$-median problem. 
This is a typical frequent itemset mining application, where
the set of items is the set of potential locations.
Each transaction of the dataset represents a sub-optimal solution of the elite set. 
A frequent itemset mined from the elite set with support $s\%$ represents a set
of locations that occur in $s\%$ of the elite solutions.

A frequent itemset is called maximal if it has no superset that is also 
frequent. In order to avoid mining frequent itemsets which are
subset of one another, we decided to extract only maximal frequent itemset. 
To execute this task, we adopted the FPmax* algorithm~\cite{Grahnel},
available at http://fimi.cs.helsinki.fi.


\chapter{Computational Experiments}
\label{sec:4}
In this section, the computational results obtained for HH and DM-HH are presented.
The strategies were evaluated using three classes of instances. The first class, named ORLIB,
consists of 40 instances and was taken from the OR-Library \cite{Beasley}.
Each instance is a different graph with a corresponding value for $p$.
The number of nodes (customers) varies from 100 to 900, and the value of $p$ ranges from 5 to 200.
The optimal values are known for these 40 instances. In the OR-Library, these
40 instances are identified by {\it pmed01} to {\it pmed40}.

Instances of the second class, named TSP, are sets of points on the plane. 
Originally proposed for the traveling salesman 
problem, they are available at the TSPLIB \cite{Reinelt}. Every point is considered 
both a potential facility and a customer, and the cost of assigning customer $c$ to 
facility $f$ is simply the Euclidean distance between the points representing $c$ and 
$f$ (e.g. the costs are real values). From the TSP class, we considered the $FL1400$ instances,
with 1400 nodes and with several different values for $p$ (number of facilities to open).

The third class we study is named RW. Originally proposed in \cite{ResWer03a}, it
corresponds to completely random distance matrices. In every case, the number of potential
facilities ($m$) is equal to the number of customers ($n$). The distance between each facility
and each customer has an integer value taken uniformly at random from the interval $[1, n]$. 
Six different values of $n$ were considered: $100$, $250$, $500$, $1000$, $1500$, and $2000$. 
In each case, several values of $p$ were tested.

The algorithms were implemented in C++ and compiled
with g++ (GCC) 4.2.3. The tests were performed on a 
2.4 GHz Intel Core 2 Quad CPU Q6600 with 3 Gbytes of RAM,
running Linux Kernel 2.6.24.

Both HH and DM-HH were run 9 times with
a different random seed in each run.
Each strategy executed 500 iterations.
After having conducted some tuning experiments, 
we set some parameter values.
The size of the elite set for mining ($d$) 
and the size of the set of patterns ($t$) were set to 10.
And a set of facilities was considered a pattern if it was
present in at least two of the elite solutions.

When executed for the 40 instances from the ORLIB class,
both HH and DM-HH reached the optimal solution in all 9 runs.
Table~\ref{tab:ORLIBTime} presents the results related to execution time of both strategies.
In this table, the first column presents the name, the number of customers and 
the value of $p$ of the working instances, the second and fourth columns show 
the average execution time (in seconds) of HH and DM-HH, obtained for 9 runs, 
the third and fifth columns present the standard deviation value of these execution times.
Smaller times are bold-faced.
The sixth column shows the percentual difference between the HH and DM-HH average times
in relation to the HH average time. In the last line, 
the average of the percentual differences is reported.
We can observe that DM-HH was always faster than HH and
that the standard deviations are quite small.
On average, DM-HH was 25.06\% faster than the HH strategy for the ORLIB instances.

\begin{table}[htbp]
\caption{Time of HH and DM-HH for ORLIB instances}
\scriptsize
% \small
\centering
\begin{tabular}{lrrrrr}
\\
\hline 
\noalign{\smallskip}
& \multicolumn{2}{c}{HH} & \multicolumn{2}{c}{DM-HH}\\
\noalign{\smallskip}
Name/Cust/\textit{p} & Time(s) & SD & Time(s) & SD & \% \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
pmed01/100/5 &       0.62 &       0.04 & {\bf 0.61} &       0.04 &       1.08 \\
pmed02/100/10 &       0.51 &       0.03 & {\bf 0.41} &       0.02 &      20.35 \\
pmed03/100/10 &       0.51 &       0.02 & {\bf 0.39} &       0.02 &      23.14 \\
pmed04/100/20 &       0.42 &       0.02 & {\bf 0.34} &       0.07 &      17.65 \\
pmed05/100/33 &       0.40 &       0.03 & {\bf 0.28} &       0.02 &      29.13 \\
pmed06/200/5 &       2.26 &       0.15 & {\bf 2.23} &       0.09 &       1.38 \\
pmed07/200/10 &       1.53 &       0.04 & {\bf 1.08} &       0.11 &      29.36 \\
pmed08/200/20 &       1.19 &       0.02 & {\bf 0.84} &       0.05 &      29.53 \\
pmed09/200/40 &       1.14 &       0.02 & {\bf 0.80} &       0.02 &      30.10 \\
pmed10/200/67 &       1.11 &       0.03 & {\bf 0.79} &       0.02 &      28.41 \\
pmed11/300/5 &       3.86 &       0.13 & {\bf 3.18} &       0.12 &      17.67 \\
pmed12/300/10 &       3.07 &       0.12 & {\bf 2.96} &       0.10 &       3.40 \\
pmed13/300/30 &       2.11 &       0.07 & {\bf 1.46} &       0.06 &      30.89 \\
pmed14/300/60 &       2.14 &       0.03 & {\bf 1.45} &       0.04 &      32.47 \\
pmed15/300/100 &       2.43 &       0.04 & {\bf 1.72} &       0.11 &      29.07 \\
pmed16/400/5 &       7.89 &       0.28 & {\bf 6.76} &       0.08 &      14.28 \\
pmed17/400/10 &       5.65 &       0.11 & {\bf 4.28} &       0.08 &      24.29 \\
pmed18/400/40 &       3.73 &       0.28 & {\bf 2.42} &       0.08 &      35.32 \\
pmed19/400/80 &       3.68 &       0.05 & {\bf 2.49} &       0.05 &      32.39 \\
pmed20/400/133 &       4.20 &       0.11 & {\bf 3.03} &       0.05 &      27.75 \\
pmed21/500/5 &      11.11 &       0.34 & {\bf 10.95} &       0.35 &       1.45 \\
pmed22/500/10 &       9.02 &       0.15 & {\bf 6.97} &       0.15 &      22.70 \\
pmed23/500/50 &       4.93 &       0.20 & {\bf 3.22} &       0.18 &      34.69 \\
pmed24/500/100 &       5.40 &       0.08 & {\bf 3.73} &       0.06 &      30.93 \\
pmed25/500/167 &       6.54 &       0.21 & {\bf 4.76} &       0.09 &      27.22 \\
pmed26/600/5 &      18.05 &       0.25 & {\bf 13.54} &       0.47 &      24.99 \\
pmed27/600/10 &      14.69 &       0.45 & {\bf 11.62} &       0.86 &      20.90 \\
pmed28/600/60 &       7.01 &       0.16 & {\bf 5.26} &     0.20 &      24.96 \\
pmed29/600/120 &       8.38 &       0.32 & {\bf 5.76} &       0.16 &      31.26 \\
pmed30/600/200 &      10.89 &       0.32 & {\bf 7.67} &       0.12 &      29.57 \\
pmed31/700/5 &      26.32 &       0.29 & {\bf 19.76} &       0.58 &      24.92 \\
pmed32/700/10 &      17.86 &       0.51 & {\bf 11.62} &       0.34 &      34.94 \\
pmed33/700/70 &      10.43 &       0.26 & {\bf 6.72} &       0.22 &      35.57 \\
pmed34/700/140 &      12.44 &       0.83 & {\bf 8.26} &       0.16 &      33.60 \\
pmed35/800/5 &      33.73 &       0.77 & {\bf 27.61} &       0.51 &      18.14 \\
pmed36/800/10 &      24.87 &       0.47 & {\bf 19.09} &       0.34 &      23.24 \\
pmed37/800/80 &      14.54 &       0.24 & {\bf 9.18} &       0.59 &      36.86 \\
pmed38/900/5 &      51.08 &       0.74 & {\bf 40.21} &       0.52 &      21.28 \\
pmed39/900/10 &      28.90 &       0.53 & {\bf 19.56} &       0.32 &      32.32 \\
pmed40/900/90 &      19.79 &       0.40 & {\bf 12.80} &       0.34 &      35.32 \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 Average &  &  &  &  &  25.06 \\
\noalign{\smallskip}
\hline
\end{tabular}
\label{tab:ORLIBTime}
\end{table}

There are two main reasons for the faster behavior of DM-HH.
First, the computational effort of the adapted construction phase is smaller
than the original HH construction, since the elements from a pattern are initially 
fixed in the solution. Then a smaller number of elements must be processed and
inserted into the constructed solution.
Second, and most important, the use of patterns leads to the construction
of better solutions which will be input for the local search. This incurs
in less computational effort taken to converge to a local optimal solution.
In the next section, we will further investigate and analyze this behavior.

When executed for the 45 instances from the RW class,
both HH and DM-HH reached the best known solutions in all 9 runs
for 23 instances. For the other 22 instances, they obtained slightly
different solutions.
In Table~\ref{tab:RWCost}, 
the results related to the quality of the obtained solutions are shown.
The first column presents the class name, the number of customers 
and the value of $p$ of the working instances, 
the second one shows the best known value for this instance, 
the third and fifth columns present the deviation value of the best cost obtained by HH 
and DM-HH related to the best known value, and the fourth and sixth 
columns present the deviation value of the average cost obtained by  
both strategy.

\begin{table}[htbp]
\caption{HH and DM-HH for RW instances}
\scriptsize
% \small
\centering
\begin{tabular}{lrrrrr}
\\
\hline 
\noalign{\smallskip}
& & \multicolumn{2}{c}{HH} & \multicolumn{2}{c}{DM-HH}\\
\noalign{\smallskip}
Class/Cust/\textit{p} & Best Known & Best & Avg & Best & Avg\\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 RW/100/10 &        530 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/100/20 &        277 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/100/30 &        213 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/100/40 &        187 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/100/50 &        172 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/250/10 &       3691 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/250/25 &       1360 &    0.000 & {\bf 0.065} &    0.000 &    0.131 \\
 RW/250/50 &        713 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/250/75 &        523 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/250/100 &        444 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/250/125 &        411 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/500/10 &      16108 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/500/25 &       5681 &    0.000 & {\bf 0.016} &    0.000 &    0.086 \\
 RW/500/50 &       2626 &    0.000 & {\bf 0.131} &    0.000 &    0.161 \\
 RW/500/75 &       1757 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/500/100 &       1379 &    0.000 & {\bf 0.056} &    0.000 &    0.064 \\
 RW/500/150 &       1024 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/500/200 &        893 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/500/250 &        833 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/1000/10 &      67811 &    0.000 & {\bf 0.000} &    0.000 &    0.107 \\
 RW/1000/25 &      24896 &    0.108 &    0.162 & {\bf 0.000} & {\bf 0.096} \\
 RW/1000/50 &      11274 &    0.053 &    0.316 & {\bf 0.000} & {\bf 0.203} \\
 RW/1000/75 &       7135 &    0.477 &    0.662 & {\bf 0.000} & {\bf 0.442} \\
 RW/1000/100 &       5218 & {\bf 0.000} &    0.290 &    0.038 & {\bf 0.200} \\
 RW/1000/200 &       2704 &    0.000 & {\bf 0.033} &    0.000 &    0.049 \\
 RW/1000/300 &       2018 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/1000/400 &       1734 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/1000/500 &       1614 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/1500/10 &     160327 &    0.000 &    0.003 &    0.000 & {\bf 0.000} \\
 RW/1500/25 &      59374 &    0.125 &    0.246 & {\bf 0.000} & {\bf 0.207} \\
 RW/1500/50 &      26912 &    0.305 &    0.598 & {\bf 0.000} & {\bf 0.469} \\
 RW/1500/75 &      16921 & {\bf 0.000} &    0.470 &    0.018 & {\bf 0.188} \\
 RW/1500/100 &      12243 &    0.335 &    0.573 & {\bf 0.000} & {\bf 0.324} \\
 RW/1500/250 &       4761 & {\bf 0.000} &    0.173 &    0.042 & {\bf 0.142} \\
 RW/1500/500 &       2867 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/1500/750 &       2422 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/2000/10 &     293073 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/2000/25 &     109481 &    0.068 &    0.593 & {\bf 0.000} & {\bf 0.241} \\
 RW/2000/50 &      50113 & {\bf 0.000} &    0.583 &    0.010 & {\bf 0.411} \\
 RW/2000/75 &      31463 &    0.372 &    0.760 & {\bf 0.000} & {\bf 0.527} \\
 RW/2000/100 &      22514 &    0.355 &    0.794 & {\bf 0.000} & {\bf 0.432} \\
 RW/2000/250 &       8204 &    0.098 & {\bf 0.274} & {\bf 0.000} &    0.288 \\
 RW/2000/500 &       4479 &    0.022 &    0.057 & {\bf 0.000} & {\bf 0.042} \\
 RW/2000/750 &       3560 &    0.000 &    0.000 &    0.000 &    0.000 \\
 RW/2000/1000 &       3225 &    0.000 &    0.000 &    0.000 &    0.000 \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
\multicolumn{2}{c}{Average} &    0.051 &    0.152 & {\bf 0.002} & {\bf 0.107} \\
\noalign{\smallskip}
\hline
\end{tabular}
\label{tab:RWCost}
\end{table}


\begin{table}[!tbp]
\caption{Time of HH and DM-HH for RW instances}
\scriptsize
% \small
\centering
\begin{tabular}{lrrrrr}
\\
\hline 
\noalign{\smallskip}
& \multicolumn{2}{c}{HH} & \multicolumn{2}{c}{DM-HH}\\
\noalign{\smallskip}
Class/Cust/\textit{p} & Time(s) & SD & Time(s) & SD & \% \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 RW/100/10 &       0.99 &       0.04 & {\bf 0.76} &       0.04 &      22.66 \\
 RW/100/20 &       0.62 &       0.08 & {\bf 0.42} &       0.02 &      31.29 \\
 RW/100/30 &       0.44 &       0.02 & {\bf 0.32} &       0.02 &      28.46 \\
 RW/100/40 &       0.40 &       0.03 & {\bf 0.31} &       0.02 &      23.76 \\
 RW/100/50 &       0.33 &       0.02 & {\bf 0.28} &       0.02 &      16.94 \\
 RW/250/10 &       7.61 &       0.28 & {\bf 6.00} &       0.17 &      21.08 \\
 RW/250/25 &       3.89 &       0.05 & {\bf 2.65} &       0.05 &      31.74 \\
 RW/250/50 &       2.33 &       0.07 & {\bf 1.57} &       0.04 &      32.35 \\
 RW/250/75 &       1.84 &       0.04 & {\bf 1.21} &       0.03 &      34.08 \\
 RW/250/100 &       1.65 &       0.03 & {\bf 1.15} &       0.04 &      30.22 \\
 RW/250/125 &       1.40 &       0.05 & {\bf 1.06} &       0.03 &      23.89 \\
 RW/500/10 &      35.91 &       0.67 & {\bf 30.44} &       0.57 &      15.24 \\
 RW/500/25 &      20.02 &       0.43 & {\bf 14.72} &       0.94 &      26.45 \\
 RW/500/50 &      10.26 &       0.20 & {\bf 7.16} &       0.25 &      30.21 \\
 RW/500/75 &       7.76 &       0.12 & {\bf 5.12} &       0.64 &      34.00 \\
 RW/500/100 &       7.23 &       0.70 & {\bf 4.61} &       0.16 &      36.21 \\
 RW/500/150 &       6.60 &       0.39 & {\bf 4.08} &       0.07 &      38.14 \\
 RW/500/200 &       6.56 &       0.25 & {\bf 4.50} &       0.24 &      31.42 \\
 RW/500/250 &       5.19 &       0.12 & {\bf 3.91} &       0.15 &      24.73 \\
 RW/1000/10 &     181.50 &      35.39 & {\bf 125.97} &       2.90 &      30.59 \\
 RW/1000/25 &     116.02 &      19.43 & {\bf 83.25} &       3.48 &      28.25 \\
 RW/1000/50 &      61.50 &       0.72 & {\bf 47.53} &       2.20 &      22.70 \\
 RW/1000/75 &      45.49 &       1.70 & {\bf 32.43} &       1.35 &      28.70 \\
 RW/1000/100 &      40.52 &       1.61 & {\bf 26.71} &       1.12 &      34.08 \\
 RW/1000/200 &      37.90 &       0.58 & {\bf 24.29} &       1.29 &      35.89 \\
 RW/1000/300 &      36.68 &       0.17 & {\bf 23.64} &       0.33 &      35.56 \\
 RW/1000/400 &      40.83 &       2.88 & {\bf 26.33} &       0.24 &      35.51 \\
 RW/1000/500 &      30.69 &       0.27 & {\bf 21.56} &       0.29 &      29.73 \\
 RW/1500/10 &     438.32 &       7.73 & {\bf 358.80} &       6.90 &      18.14 \\
 RW/1500/25 &     268.78 &       7.33 & {\bf 213.17} &       9.10 &      20.69 \\
 RW/1500/50 &     165.73 &       3.28 & {\bf 127.17} &       6.46 &      23.26 \\
 RW/1500/75 &     114.82 &       0.98 & {\bf 90.74} &      13.65 &      20.98 \\
 RW/1500/100 &      99.12 &      10.74 & {\bf 70.30} &       3.15 &      29.07 \\
 RW/1500/250 &      85.34 &       0.92 & {\bf 54.70} &       4.39 &      35.90 \\
 RW/1500/500 &      89.93 &       0.21 & {\bf 57.23} &       0.33 &      36.36 \\
 RW/1500/750 &      72.81 &       0.20 & {\bf 51.88} &       0.26 &      28.75 \\
 RW/2000/10 &     830.56 &      35.49 & {\bf 725.40} &      28.33 &      12.66 \\
 RW/2000/25 &     523.63 &      11.75 & {\bf 433.63} &      15.92 &      17.19 \\
 RW/2000/50 &     360.63 &      12.40 & {\bf 278.68} &      15.15 &      22.73 \\
 RW/2000/75 &     240.66 &       5.66 & {\bf 180.77} &       6.36 &      24.88 \\
 RW/2000/100 &     189.47 &       4.75 & {\bf 142.00} &       6.59 &      25.05 \\
 RW/2000/250 &     148.36 &      11.37 & {\bf 94.12} &       7.86 &      36.56 \\
 RW/2000/500 &     153.68 &       3.30 & {\bf 95.02} &       2.26 &      38.17 \\
 RW/2000/750 &     182.84 &      22.41 & {\bf 115.27} &       2.39 &      36.96 \\
 RW/2000/1000 &     132.88 &       0.29 & {\bf 92.63} &       3.23 &      30.29 \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 Average &  &  &  &  &  28.26 \\
\noalign{\smallskip}
\hline
\end{tabular}
\label{tab:RWTime}
\end{table}

The deviation value is computed as follows:
\begin{equation} \label{e1.1}
   dev = \frac{(HeuristicCost-BestCost)}{BestCost} \times 100,
\end{equation}
where $HeuristicCost$ is the (best or average) cost obtained
by the heuristic technique and the $BestCost$ is the optimal or
best known value for the working instance. 

The smallest values, i.e., the better results, are bold-faced.
The last line of these tables presents the average values of each column.
Out of the 22 instances for which HH and DM-HH presented different results,
in 10 instances, HH reached the best know value in all 9 runs and the DM-HH
reached this result in 18 instances (for these instances the Best value is zero).
The DM-HH strategy found 11 better results for best values and 4 were found by HH.
Considering the average results, DM-HH found 15 better values and HH found 7.
These results show that the DM-HH strategy was able 
to improve slightly the results obtained by HH for the RW class.

In terms of computational time, we can observe in Table~\ref{tab:RWTime}
that, again, the DM-HH strategy was faster than HH. On average, DM-HH was
28.26\% faster.  

Table~\ref{tab:FLCost} presents the results related to the quality 
of the solutions obtained by HH and DM-HH when evaluated for the 
18 instances from the FL1400 class.
Both strategy reached the best known solutions in all 9 runs
for just 3 instances. For the other 15 instances, they obtained slightly
different solutions.
The HH strategy found 6 better results for best values and just one was found by DM-HH.
Considering the average results, HH found 7 better values and DM-HH found 4.
Differently from ORLIB and RW classes, these results show that 
the HH strategy, for the FL1400 class, was able to obtain slightly better results than DM-HH.

\begin{table}[!tbp]
\caption{HH and DM-HH for FL1400 instances}
%\scriptsize
% \small
\centering
\begin{tabular}{lrrrrr}
\\
\hline 
\noalign{\smallskip}
& & \multicolumn{2}{c}{HH} & \multicolumn{2}{c}{DM-HH}\\
\noalign{\smallskip}
Class/Cust/\textit{p} & Best Known & Best & Avg & Best & Avg\\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 FL/1400/10 &  101249.47 &    0.000 &    0.000 &    0.000 &    0.000 \\
 FL/1400/20 &   57857.55 &    0.001 &    0.001 &    0.001 &    0.001 \\
 FL/1400/30 &   44013.48 &    0.000 &    0.000 &    0.000 &    0.000 \\
 FL/1400/40 &   35002.52 &    0.000 &    0.000 &    0.000 &    0.000 \\
 FL/1400/50 &   29089.78 &    0.002 &    0.002 &    0.002 &    0.002 \\
 FL/1400/60 &   25161.12 &    0.000 &    0.002 &    0.000 & {\bf 0.000} \\
 FL/1400/70 &   22125.53 &    0.002 &    0.002 &    0.002 &    0.002 \\
 FL/1400/80 &   19870.85 &    0.000 & {\bf 0.001} &    0.000 &    0.012 \\
 FL/1400/90 &   17987.94 &    0.004 &    0.004 &    0.004 &    0.004 \\
 FL/1400/100 &    16551.2 &    0.006 &    0.019 &    0.006 & {\bf 0.014} \\
 FL/1400/150 &   12026.43 & {\bf 0.000} &    0.029 &    0.000 & {\bf 0.018} \\
 FL/1400/200 &    9357.74 &    0.011 & {\bf 0.028} & {\bf 0.000} &    0.028 \\
 FL/1400/250 &     7739.8 & {\bf 0.000} & {\bf 0.032} &    0.023 &    0.057 \\
 FL/1400/300 &    6620.92 & {\bf 0.001} & {\bf 0.048} &    0.014 &    0.067 \\
 FL/1400/350 &    5720.88 & {\bf 0.000} & {\bf 0.048} &    0.019 &    0.076 \\
 FL/1400/400 &    5006.75 &    0.000 &    0.013 &    0.000 & {\bf 0.009} \\
 FL/1400/450 &    4468.31 & {\bf 0.000} & {\bf 0.062} &    0.037 &    0.095 \\
 FL/1400/500 &    4046.16 & {\bf 0.000} & {\bf 0.039} &    0.001 &    0.049 \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
\multicolumn{2}{c}{Average}   & {\bf 0.001} & {\bf 0.018} &    0.006 &    0.024 \\
\noalign{\smallskip}
\hline
\end{tabular}
\label{tab:FLCost}
\end{table}

However, for the time analysis, DM-HH always improved the HH performance.
In Table~\ref{tab:FLTime}, we observe that, once more, the DM-HH strategy 
was faster than HH. At this time, DM-HH was 30.03\% faster.  

\begin{table}[!tbp]
\caption{Time of HH and DM-HH for FL1400 instances}
%\scriptsize
% \small
\centering
\begin{tabular}{lrrrrr}
\\
\hline 
\noalign{\smallskip}
& \multicolumn{2}{c}{HH} & \multicolumn{2}{c}{DM-HH}\\
\noalign{\smallskip}
Class/Cust/\textit{p} & Time(s) & SD & Time(s) & SD & \% \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 FL/1400/10 &     155.05 &       5.51 & {\bf 120.78} &       3.78 &      22.10 \\
 FL/1400/20 &     103.13 &       3.41 & {\bf 70.79} &       1.97 &      31.36 \\
 FL/1400/30 &     105.19 &       2.67 & {\bf 65.67} &       1.42 &      37.57 \\
 FL/1400/40 &      93.92 &       2.17 & {\bf 64.76} &       1.99 &      31.05 \\
 FL/1400/50 &      77.66 &       1.29 & {\bf 49.60} &       1.04 &      36.13 \\
 FL/1400/60 &      76.28 &       1.37 & {\bf 54.31} &       2.17 &      28.80 \\
 FL/1400/70 &      65.84 &       1.20 & {\bf 40.97} &       1.00 &      37.78 \\
 FL/1400/80 &      66.35 &       1.85 & {\bf 42.61} &       1.45 &      35.77 \\
 FL/1400/90 &      61.96 &       2.05 & {\bf 38.34} &       1.53 &      38.13 \\
 FL/1400/100 &      62.62 &       1.87 & {\bf 46.11} &       0.61 &      26.36 \\
 FL/1400/150 &      63.00 &       1.35 & {\bf 51.59} &       1.69 &      18.12 \\
 FL/1400/200 &      61.79 &       0.78 & {\bf 43.41} &       3.19 &      29.75 \\
 FL/1400/250 &      70.98 &       2.24 & {\bf 47.72} &       3.17 &      32.78 \\
 FL/1400/300 &      75.40 &       2.35 & {\bf 52.50} &       2.71 &      30.37 \\
 FL/1400/350 &      77.13 &       2.30 & {\bf 57.20} &       3.49 &      25.83 \\
 FL/1400/400 &      77.62 &       1.81 & {\bf 54.95} &       3.87 &      29.20 \\
 FL/1400/450 &      84.72 &       2.39 & {\bf 65.67} &       2.17 &      22.48 \\
 FL/1400/500 &      93.48 &       5.76 & {\bf 68.27} &       3.07 &      26.97 \\
\noalign{\smallskip}
\hline
\noalign{\smallskip}
 Average &  &  &  &  &  30.03 \\
\noalign{\smallskip}
\hline
\end{tabular}
\label{tab:FLTime}
\end{table}

\chapter{Strategies Behavior Analysis}
\label{sec:5}
In this section, we present some additional analysis of computational experiments 
performed to illustrate the behavior of both strategies. 

Figures \ref{fig:hh1} and \ref{fig:dm1} present
the behavior of the construction, local search and path-relinking phases, in terms
of the cost values obtained, by HH and DM-HH
through the execution of 500 iterations, for the specific 
instance rw1000-p25.

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{custopit-hh.pdf}
\caption{One execution of HH for rw1000-p25}
\label{fig:hh1}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{custopit-dm.pdf}
\caption{One execution of DM-HH for rw1000-p25}
\label{fig:dm1}
\end{figure}

These figures show that the local search always 
reduces the cost of the solution obtained by the construction phase and the path-relinking phase is able to decrease 
the cost of the solutions obtained by the local search phase in both heuristics HH and DM-HH. 

In Figure \ref{fig:hh1}, we observe that the
behavior of the construction, local search and path-relinking phases performed in HH looks the 
same through all iterations. 

Figure \ref{fig:dm1} shows that the DM-HH strategy provides an improvement
in the quality of the solutions reached by the construction, local search and path-relinking phases after iteration 250, where DM-HH starts to use the patterns found by the data mining procedure.

Table~\ref{tabavcost} shows the average costs of the solutions obtained by the construction phase, and after the local search and path-relinking phases in the first 250 iterations and in the last 250 iterations, i.e., before using the patterns generated by the data mining procedure and using these patterns.  
All phases present better cost values after using the patterns generated by the data mining procedure.
From this decreasing values, we can deduce the benefit from
executing the data mining procedure not only for the construction phase but also for the local search and path-relinking phases.

\begin{table}[ht]
\caption{Average cost values} 
%\scriptsize
\centering
\begin{tabular}{rrrr}
\\
\hline
& Construction & Local search & Path-relinking\\
\hline
First 250 iterations &  33675.70 &  26555.27 & 25846.06 \\
Next 250 iterations &  29007.49 &  25352.95 & 25276.90 \\
\hline
\end{tabular}  
\label{tabavcost}
\end{table}

Figures \ref{fig:hht1} and \ref{fig:dmt1} show
the behavior of the construction, local search and path-relinking phases, for both strategies HH and DM-HH in terms
of the computational time, through the execution of 500 iterations, for the same instance rw1000-p25.

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{tempopit-hh.pdf}
\caption{One execution of HH for rw1000-p25}
\label{fig:hht1}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{tempopit-dm.pdf}
\caption{One execution of DM-HH for rw1000-p25}
\label{fig:dmt1}
\end{figure}

Table~\ref{tabavtime} presents the average computational times used to execute the construction, local search and path-relinking phases in the first 250 iterations and in the last 250 iterations.  
We can clearly see that computational times of all phases dropped substantially after starting to use the patterns generated by the data mining procedure. 
The construction phase demands less computational time because it starts from a solution partially built using
the obtained patterns. The necessary effort required by the local
search procedure to find a local optimum decreases due to the better solutions
provided by the construction phase. 
As the solutions generated after the local search procedure present better cost in the iterations which use the data mining patterns, they are
more similar to the solutions in the path-relinking pool and the path-relinking procedure takes less time to execute.

\begin{table}[ht]
\caption{Average computational times} 
%\scriptsize
\centering
\begin{tabular}{rrrr}
\\
\hline
 & Construction & Local search & Path-relinking\\
\hline
First 250 iterations &  7.66 &  101.82 & 105.45 \\
Next 250 iterations &  3.95 &   51.95 & 82.83  \\
\hline
\end{tabular} 
\label{tabavtime} 
\end{table}

Another experiment was performed to evaluate the time required 
for HH and DM-HH to achieve a target solution value. 
Each strategy was run 100 times (with different random seeds), until a target
solution was reached for a specific instance. 
In each run, if the target value was not found in 500 iterations, then the pos-optimization was performed until the target value was found or the elite set used for the pos-optimization procedure was not updated.
  
The instance rw1000-p25 was used as the test case, and three targets were analyzed: 
an easy target (value 24964), an intermediate (value 24948), and a more 
difficult one (value 24923).

Figures \ref{fig:tempoalvoitfacil}, \ref{fig:tempoalvoitmedio} and \ref{fig:tempoalvoitdificil} show, 
for each target, the evaluation of the strategies. 
For each seed, the time in which the target was reached is plotted. 
DM-HH was able to find the easy target in all executions, the intermediate target in all except one execution and the difficult target in all except 4 executions, while HH was not able to find the easy target in 3 executions, the intermediate target in 23 executions and the difficult target in 33 executions.

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{tempoalvoit-rw1000-p25-facil.pdf}
\caption{Analysis of convergence to an easy target for instance rw1000-p25.}
\label{fig:tempoalvoitfacil}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{tempoalvoit-rw1000-p25-medio.pdf}
\caption{Analysis of convergence to an intermediate target for instance rw1000-p25.}
\label{fig:tempoalvoitmedio}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{tempoalvoit-rw1000-p25-dificil.pdf}
\caption{Analysis of convergence to a difficult target for instance rw1000-p25.}
\label{fig:tempoalvoitdificil}
\end{figure}

We can observe that in almost all executions, for
the three targets, the DM-HH reached the target before the HH.
Also, DM-HH was able to find the targets more often than HH.

The results of these experiments evidenced that the incorporation of 
the data-mining procedure into the original HH heuristic was able to improve 
substantially its efficiency, 
in terms of the time required to achieve a target solution.

Figures~\ref{fig:tttfacil}, \ref{fig:tttmedio} and \ref{fig:tttdificil} 
show another comparison between HH and DM-H strategies,
based on $Time$-$to$-$target$ (TTT) plots~\cite{Aiex}, which are
used to analyze the behavior of randomized algorithms.

A TTT plot is generated, initially, by executing an algorithm several 
times and measuring the time 
required to reach a solution at least as good as a target solution.
We executed each strategy a hundred times. 
Then, the $i$-th sorted running time $t_i$ is associated with a 
probability $p_i=(i-1/2)/100$ and the points
$z_i = (t_i,p_i)$, for $i = 1,. . . ,100$ are plotted.
Each plotted point indicates the probability (vertical axis) for the strategy
to achieve the target solution in the indicated time (horizontal axis).

The plots presented in Figures~\ref{fig:tttfacil}, \ref{fig:tttmedio} and \ref{fig:tttdificil}
were generated by the execution of HH and DM-HH, 
for instance rw1000-p25, using the same three target solution values used in the 
previous experiment.

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{ttt-plot_rw1000-p25_facil.pdf}
\caption{Time-to-target plot for an easy target.}
\label{fig:tttfacil}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{ttt-plot_rw1000-p25_medio.pdf}
\caption{Time-to-target plot for an intermediate target.}
\label{fig:tttmedio}
\end{figure}

\begin{figure}[ht]
\centering
\includegraphics[width=.70\textwidth]{ttt-plot_rw1000-p25_dificil.pdf}
\caption{Time-to-target plot for a difficult target.}
\label{fig:tttdificil}
\end{figure}

For the easy target, we observe in Figure~\ref{fig:tttfacil} that HH and DM-HH
present similar behaviors until about 50 seconds when the probability for DM-HH to
find the target value starts to be greater than for HH. This happens because, until the data mining procedure 
is  executed in DM-HH, both strategies obtain the same solution in each iteration, but DM-HH starts to find the target value faster when the patterns are used.

For both intermediate and difficult targets, Figures~\ref{fig:tttmedio} and \ref{fig:tttdificil}
respectively, we observe that DM-HH behaves better than HH.
These plots indicate that DM-HH is able to reach difficult 
solutions faster than HH. 

The analysis performed in this section shows that the new data
mining version of the hybrid heuristic was able to reach good quality
solutions much faster than the original strategy.
It also demonstrates that a sophisticated heuristic like HH, which is improved with
a memory-based intensification mechanism, like the path-relinking technique, can
benefit from the incorporation of a data mining procedure.

\chapter{Conclusions}
\label{sec:6}
In previous works, the DM-GRASP strategy, 
a hybrid version of the GRASP metaheuristic which 
incorporates a data mining procedures, was proposed and developed.
DM-GRASP has been successfully applied to solve 
different combinatorial optimization problems. 
This proposal was based on the hypothesis that patterns extracted from 
sub-optimal obtained solutions could
guide the search for better ones in less computational time.

The aim of this work was to introduce
a data mining procedure into a state-of-the-art heuristic 
for a specific problem in order to give evidences that, 
when a technique is able to reach the optimal solution, 
or a near-optimal solution with little chance of improvements,
the mined patterns could be used to guide the search for the 
optimal or near optimal solutions in less computational time.

We then developed the DM-HH, a data mining version of a hybrid 
and state-of-the-art multistart heuristic to solve the p-median problem.
Computational experiments, conducted on a set of
instances from the literature, showed that the new version 
of the hybrid heuristic was able to reach optimal and 
near-optimal solutions, on average, 27.32\% faster than the 
original strategy, which represents significant savings
on execution times.

A secondary contribution of this work was to show that not only the traditional
GRASP metaheuristic but also other more sophisticated heuristic, improved with
a memory-based intensification mechanism, like the path-relinking technique,
could benefit from the incorporation of a data mining procedure.

These encouraging results motivate us, as future work, 
to try to introduce into other metaheuristics, like tabu search and
genetic algorithms, the idea of extracting 
patterns from sub-optimal solutions using data mining techniques and 
exploring them in search procedures.
We believe that other metaheuristics and many combinatorial optimization
problems can benefit from the incorporation of data mining techniques. 


%\begin{acknowledgements}
%If you'd like to thank anyone, place your comments here
%and remove the percent signs.
%\end{acknowledgements}

% BibTeX users please use one of
%\bibliographystyle{spbasic}      % basic style, author-year citations
%\bibliographystyle{spmpsci}      % mathematics and physical sciences
%\bibliographystyle{spphys}       % APS-like style for physics
%\bibliography{}   % name your BibTeX data base

% Non-BibTeX users please use

\begin{thebibliography}{1}
\bibitem{Agrawal}
R. Agrawal and R. Srikant,
{\em Fast algorithms for mining association rules},
Proceedings of the Very Large Data Bases Conference, pp.~487--499 (1994).

\bibitem{Aiex}
R. Aiex, M. G. C. Resende, and C. Ribeiro,
{\em {TTT} plots: a perl program to create time-to-target plots},
Optimization Letters 4, pp.~355--366 (2007)

\bibitem{Beasley}
J. E. Beasley,  
{\em A note on solving large p-median problems}, 
European Journal of Operational Research 21, pp.~270--273 (1985).

% \bibitem{Bongartz}
% I. Bongartz, P. H. Calamai, and A. R. Conn,   
% {\em A projection method for $l_{p}$ norm location-allocation problems}, 
%  Mathematical Programming 66, pp.~283--312 (1994).

\bibitem{CFN77}
G. Cornu\'ejols, M.~L. Fisher, and G.~L. Nemhauser, 
{\em Location of Bank Accounts to Optimize Float: An Analytical Study of Exact and Approximate Algorithms}, 
Management Science 23, pp.~789--810 (1977).

\bibitem{Feo89}
T.~A. Feo and M.~G.~C. Resende,  
{\em A probabilistic heuristic for a computationally difficult set covering problem}, 
Operations Research Letters 8, pp.~67--71 (1989).

\bibitem{Feo95}
T.~A. Feo and M.~G.~C. Resende,
{\em Greedy randomized adaptive search procedures}, 
Journal of Global Optimization 6, pp.~109--133 (1995).

\bibitem{Festa1}
P. Festa and M.~G.~C. Resende,
{\em An annotated bibliography of GRASP - Part I: Algorithms}, 
International Transactions in Operational Research 16, pp.~1--24 (2009).

\bibitem{Festa2}
P. Festa and M.~G.~C. Resende,
{\em An annotated bibliography of GRASP - Part II: Applications}, 
International Transactions in Operational Research 16, pp.~131--172 (2009).

\bibitem{Fleurent}
C. Fleurent and F. Glover,
{\em Improved Constructive Multistart Strategies for the Quadratic 
Assignment Problem Using Adaptive Memory},
INFORMS J. on Computing 2, pp.~198-204 (1999).

% \bibitem{Gamal}
% M.~D.~H. Gamal and S. Salhi,
% {\em A cellular heuristic for the multisource Weber problem},
% Computers \& Operations Research 30, pp.~1609--1624 (2003).

\bibitem{Glo00a}
F. Glover, 
{\em Multi-start and strategic oscillation methods -- Principles to exploit adaptive memory},
Computing Tools for Modeling, Optimization and Simulation: Interfaces in Computer Science and Operations Research, 
Kluwer, pp.~1--24 (2000).

\bibitem{Glover}
F. Glover, M. Laguna, and R. Mart\'i, 
{\em Fundamentals of scatter search and path-relinking}
Control and Cybernetics 19, pp.~653--684 (1977).

\bibitem{GloLagMar03}
F. Glover, M. Laguna, and R. Mart\'\i,
{\em Scatter search and path relinking: Advances and applications},
Handbook of Metaheuristics, Kluwer, pp.~1--35 (2003).

\bibitem{Goethals}
B. Goethals and M. J. Zaki,
{\em Advances in Frequent Itemset Mining Implementations: Introduction to FIMI-03},
Proceedings of the IEEE ICDM Workshop on Frequent Itemset Mining Implementations (2003).

\bibitem{Grahnel}
G. Grahne and J. Zhu,
{\em Efficiently using prefix-trees in mining frequent item-sets},
Proceedings of the IEEE ICDM Workshop on Frequent Itemset Mining Implementations (2003).

\bibitem{Han00}
J. Han, J. Pei, and Y. Yin,
{\em Mining frequent patterns without candidate generation},
Proceedings of the ACM SIGMOD International Conference on Management of Data, pp.~1--12 (2000).

\bibitem{Han}
J. Han and M. Kamber,
{\em Data Mining: Concepts and Techniques},
$2^{nd}$ Ed., Morgan Kaufmann Publishers (2006).

\bibitem{HM97}
P. Hansen and N. Mladenovi\'c,
{\em Variable Neighborhood Search for the p-Median}, 
Location Science 5, pp.~207--226 (1997).

\bibitem{HMP01}
P. Hansen, N. Mladenovi\'c, and D. Perez-Brito, 
{\em Variable Neighborhood Decomposition Search},
Journal of Heuristics 7, pp.~335--350 (2001).

\bibitem{Kariv}
O. Kariv and L. Hakimi,
{\em An algorithmic approach to network location problems, Part II: The p-medians},
SIAM Journal of Applied Mathematics 37, pp.~539--560 (1979).

\bibitem{Lin}
S. Lin and B.W. Kernighan, 
{\em An effective heuristic algorithm for the traveling salesman problem},
Operations Research 21, pp.~498--516 (1973).

\bibitem{Lodi}
A. Lodi, K. Allemand, and T. M. Liebling,
{\em An evolutionary heuristic for quadratic 0-1 programming},
European Journal of Operational Research 119, pp.~662--670 (1999).

% \bibitem{Mladenovic}
% N. Mladenovi\'{c}, J. Brimberg, P. Hansen and  Jos\'{e} A. Moreno-P\'{e}rez, 
% {\em The p-median problem: A survey of metaheuristic approaches},
% European Journal of Operational Research 179, pp.~927--939 (2007).

\bibitem{Orlando}
S. Orlando, P. Palmerimi, and R. Perego,
{\em Adaptive and resource-aware mining of frequent sets},
Proceedings of the IEEE International Conference on Data Mining, pp.~338--345 (2002).

\bibitem{Osman96}
I. Osman and G. Laporte, 
{\em Metaheuristics: A bibliography},
Annals of Operations Research 63, pp.~513--623 (1996).

\bibitem{Rao71}
M.R. Rao,
{\em Cluster analysis and mathematical programming},
Journal of the American Statistical Association 66, pp.~622--626 (1971).


\bibitem{Reinelt}
G. Reinelt,
{\em TSPLIB: A traveling salesman problem library},
ORSA Journal on Computing 3, pp.~376--384 (1991), 
{\em http://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/}.

% \bibitem{Resende}
% M.~G.~C. Resende and C.~C. Ribeiro,
% {\em Greedy randomized adaptive search procedures},
% Handbook of Metaheuristics, Kluwer (2003).

\bibitem{Resende04}
M.~G.~C. Resende and R.~F. Werneck,
{\em A hybrid heuristic for the p-median problem},
Journal of Heuristics 10, pp.~59--88 (2004).

\bibitem{ResWer03a}
M.~G.~C. Resende and R~.F~. Werneck,
{\em On the implementation of a swap-based local search procedure for the $p$-median problem}, 
Proceedings of the Fifth Workshop on Algorithm Engineering and Experiments -- ALENEX03, pp.~119--127 (2003).

\bibitem{Ribeiro04}
M.~H.~F. Ribeiro, V.~F. Trindade, A. Plastino, and S.~L. Martins, 
{\em Hybridization of GRASP metaheuristic with data mining techniques}, 
Proceedings of the ECAI Workshop on Hybrid Metaheuristics,
pp.~69--78 (2004).

\bibitem{Ribeiro06}
M.~H.~F. Ribeiro, V.~F. Trindade, A. Plastino, and S.~L. Martins,
{\em Hybridization of GRASP metaheuristic with data mining techniques},
Journal of Mathematical Modeling and Algorithms 5, pp.~23--41 (2006).

% \bibitem{Ribeiro09}
% C. C. Ribeiro, I. Rosseti and R. Vallejos,
% {\em On the use of run time distributions to evaluate and compare 
% stochastic local search algorithms},
% Proceedings of the Engineering Stochastic Local Search Algorithms Workshop,
% Lecture Notes in Computer Science 5752, pp.~16--30 (2009).

\bibitem{Salhi06}
S. Salhi,
{\em Heuristic Search: The Science of Tomorrow},
OR48 Keynote Papers, Operational Research Society, pp.~38--58 (2006).

\bibitem{Santos05}
L.~F. Santos, M.~H.~F. Ribeiro, A.~Plastino, and S.~L. Martins,
{\em A hybrid GRASP with data mining for the maximum diversity problem}, 
Proceedings of the  International Workshop on Hybrid Metaheuristics,
LNCS 3636, pp.~116--127 (2005).

\bibitem{Santos06}
L.~F. Santos, C.~V. Albuquerque, S.~L. Martins, and A. Plastino,
{\em A hybrid GRASP with data mining for efficient server replication for reliable multicast}, 
Proceedings of the IEEE GLOBECOM Conference (2006).

\bibitem{Santos08}
L.~F. Santos, S.~L. Martins, and A.~Plastino,
{\em Applications of the DM-GRASP heuristic: A survey},
International Transactions in Operational Research 15, pp.~387--416 (2008).

\bibitem{SL00}
E.~L.~F. Senne and L.~A.~N. Lorena, 
{\em Langrangean/Surrogate Heuristics for p-Median Problems},
Computing Tools for Modeling, Optimization and Simulation: Interfaces in Computer Science and Operations Research, 
M. Laguna and J.~L. Gonz\'alez-Velarde (eds.), Kluwer, pp. 115--130 (2000).

\bibitem{Taillard03}
E.~D. Taillard, 
{\em Heuristic Methods for Large Centroid Clustering Problems},
Journal of Heuristics 9, pp.~51--74 (2003).

\bibitem{Talbi}
E.~G. Talbi, 
{\em A taxonomy of hybrid metaheuristics}, 
Journal of Heuristics 8, pp.~541--564 (2002).

\bibitem{Tansel}
B.~C. Tansel, R.~L. Francis, and T.~J. Lowe. 
{\em Location on networks: A survey},
Management Science 29, pp.~482--511 (1983).

\bibitem{TB68}
M.~B. Teitz and P. Bart, 
{\em Heuristic Methods for Estimating the Generalized Vertex Median of a Weighted Graph}, 
Operations Research 16, pp.~955--961 (1968).

\bibitem{Vin69}
H.D. Vinod, 
{\em Integer programming and the theory of groups},
Journal of the American Statistical Association 64, pp.~506--519 (1969).

\bibitem{Whitaker83}
R. Whitaker, 
{\em A Fast Algorithm for the Greedy Interchange of Large-Scale Clustering and Median Location Problems},
INFOR 21, pp.~95--108 (1983).

\bibitem{Witten}
I.~H. Witten and E. Frank, 
{\em Data Mining: Practical Machine Learning Tools and Techniques}. 
$2^{nd}$ Ed., Morgan Kaufmann Publishers (2005).
\end{thebibliography}

\end{document}
% end of file template.tex

