\documentclass[conference]{IEEEtran}
\usepackage{cite} %For improved citations
\usepackage[pdftex]{graphicx}
\graphicspath{{./images/}}
\usepackage[cmex10]{amsmath}
\usepackage{array}
\usepackage{mdwmath}
\usepackage{mdwtab}
\usepackage{eqparbox}
\usepackage[caption=false]{caption}
\usepackage[font=footnotesize]{subfig}
\usepackage{fixltx2e}
\usepackage{url}
\usepackage{ctable}

% My renew commands
\newcommand{\vlan}{\textsc{VGrid}}
\newcommand{\lids}{\emph{Lids}}
\newcommand{\vlanbench}{VBench}
\newcommand{\eq}{\vlanbench EQ}
\newcommand{\ms}{\vlanbench MS}

\newcommand{\homo}{\ensuremath{S_H}}
\newcommand{\cano}{\ensuremath{S_C}}
\newcommand{\cpu}{\ensuremath{S_C^{cpu}}}
\newcommand{\bdw}{\ensuremath{S_C^{inter\beta}}}
\newcommand{\lat}{\ensuremath{S_C^{inter\lambda}}}

\hyphenation{net-works} % Correct hyphenations

\begin{document}

\title{\vlan: an emulator of heterogeneous computational grids}
\author{\IEEEauthorblockN{Basile Clout and Eric Aubanel}
  \IEEEauthorblockA{Faculty of Computer Science\\
    University of New Brunswick\\
    Fredericton, NB\\
    Canada, E3B5A3\\
    \{basile.clout, aubanel\}@unb.ca} 
}

\maketitle
\pagestyle{plain}

%Abstract
\begin{abstract}
  Heterogeneous distributed computing is found in a variety of fields
  including scientific computing, Internet and mobile
  devices. Computational grids focusing primarily on
  computationally-intensive operations have emerged as a new infrastructure
  for high performance computing. Specific algorithms such as
  scheduling, load balancing and data redistribution have been devised
  to overcome the limitations of these systems and take full advantage
  of their processing power. However, experimental validation and
  fine-tuning of such algorithms require multiple heterogeneous
  platforms and configurations. We present \vlan, a computational grid
  emulator based on the heterogeneous emulator Wrekavoc. \vlan\
  reshapes the virtual topology of a homogeneous cluster, degrades the
  performance of the processors and modifies the characteristics of
  the network links in an accurate, independent and reproducible
  way. We demonstrate its utility using two parallel matrix-vector programs and a series of four emulated grids.
\end{abstract}


\section{Introduction}
\label{sec:intro}


Computational grids are usually composed of heterogeneous computing
and communication resources, and the performance of theses resource
may vary greatly. In order to harness the computational power of these
systems while managing their increasing complexity, algorithms from
research fields as varied as dynamic load balancing \cite{challenges}
and mesh partitioning \cite{pagridL}, are being adapted to or specially
devised for computational grids. Furthermore, experimental evaluation
is required to tune and compare the performance of these
algorithms. However, thorough testing of a distributed algorithm on
real-world large-scale platforms is not realistic, as it requires a
fully functional implementation with controllable
heterogeneous resource configurations.

Heterogeneous distributed environment simulators such as
\cite{simgrid} or \cite{bricks} allow for reproducible experiments on
a large sample of heterogeneous computational grids and experimental
conditions. Although these tools are convenient for rapid prototyping
and validation of distributed algorithms, they fail to correctly
describe the complex interactions between software stack and hardware,
including library implementations, operating system specific features
and hardware optimizations.

Despite not having the flexibility and scalability of a simulator, the
approach taken in Wrekavoc \cite{Wrekavoc} overcomes some of their
limitations. Wrekavoc independently degrades the performance of the
processors and network links of a homogeneous cluster in an accurate
and reproducible way. Wrekavoc takes advantage of the \emph{tc} Linux
kernel subsystem and \emph{netem}, a network emulator for
\emph{tc}. The resulting heterogeneous platform exhibits heterogeneous
processor and link characteristics while keeping all the complexity of
the original cluster. However, Wrekavoc does not take into account the
topology of a platform, practically neglecting the effect of
congestion or reordering. Large-scale network emulators such as
Modelnet \cite{modelnet} and WANem \cite{wanem} provide such
capability by allowing the user to evaluate distributed networked
systems in realistic Wide Area Network or Internet over a LAN
environment. Nevertheless, some machines have to be dedicated to the
traffic emulation. Similarly, Virtual LAN (IEEE 802.1Q, \cite{vlan})
is a technology allowing the creation of virtual distinct networks
from an original LAN, but requires dedicated hardware.

In this context, we have developped \vlan, a computational grid
emulator. \vlan\ extends the concepts implemented in Wrekavoc while
adding a topology emulator specifically designed to emulate
computational grids. \vlan\ takes advantage of the Linux kernel
features and does not require dedicated or complementary hardware.


\section{Overview of \vlan}
\label{sec:overview}

\vlan\ is composed of a set of Python and Perl scripts aiming at
emulating, testing and monitoring the characteristics of a
heterogeneous computational grid. It makes heavy use of subsystems
available in recent Linux kernels ($>$ 2.6.16).

\vlan\ creates virtual topologies made of interconnected clusters
(Fig. \ref{fig:topo}). Let $S=S(P,L)$ be the processor graph
corresponding to a heterogeneous computational grid with $n$
clusters. $P$ is the set of processors and $L$ the set of network
links of this platform. A cluster $C_{i<n}(P_{i<n}, L_{i<n}) \subset
S$ is a fully connected subgraph of $S$. $P_{i}$ is the set of
processors in $C_{i}$ and $L_{i}$ the set of links of $L$ that
connects two processors of $P_i$. We call ``routers'' the nodes of
$C_i$ connected to one or more nodes from another cluster. For each
cluster $C_i$, \vlan\ homogeneously degrades the computing performance
of the processors in $P_i$, as well as the available bandwidth and
latency of the links in $L_i$. Finally, \vlan\ modifies the bandwidth
and latency of the inter-cluster links.

\begin{figure}[!t]
  \centering
  \includegraphics[width=2.8in]{grid3.pdf}
  \caption{\vlan\ model of a computational grid}
  \label{fig:topo}
\end{figure}



The description of a heterogeneous computational grid includes three
parts. First, the topology of the heterogeneous grid is described in
terms of interconnected nodes and clusters. One cluster may be
connected to several other clusters and may contain multiple virtual
routers themselves connected to multiples routers from other
clusters. Two optional parts define the bandwidth and latency of
intra- and intercluster links, and the percentage of original CPU
power allowed to processes belonging to a user for each
cluster. \vlan\ locally writes 6 independent scripts for each node:
three for setting the topology, processor performance and network
links characteristics, and the others for deleting the
configuration. The scripts are sent to each node and run locally. The
configuration can be modified or reset by running one or more delete
scripts.

\vlan\ also monitors the behavior of the emulated grids with nmap
\cite{nmap}, iperf \cite{iperf} and ping. \vlan\ can graphically
represent in 3D the grid using Graphviz \cite{graphviz} and VTK
\cite{vtk}.


\section{Architecture}
\label{sec:architecture}

\subsection{Topology}
\label{sec:topo}



\begin{figure}[!t]
  \centering  
  \includegraphics[width=2.8in]{grids/homo.pdf} 
  \caption{\lids: \homo}
  \label{fig:lids}
\end{figure}

In order to emulate the virtual topology corresponding to the graph
$S=S(P,L)$ on a homogeneous cluster represented by $C=C(P,L_C)$, the
routing tables of the nodes in $P$ are modified to set static
routes. An entry for each node of the emulated grid is added to the
routing table. This entry specifies the next hop in the shortest path
from the local node $p$ to the destination node $q$, as well as a tag
corresponding to the cluster $C_q$ the destination node belongs
to. The first step in the determination of the next hop in the
shortest path between two hosts $p$ and $q$ of the emulated grid is
the calculation of the shortest path $(C_p, C_q)$ in terms of clusters
with a Floyd-Warshall algorithm. By default, each link is uniformly
weighted. If the destination node $q$ is not in the emulated grid, no
particular routing is performed and the default route is used. If $p$
is a router and is directly connected to the next hop cluster in
$(C_p, C_q)$, the packet is addressed to the corresponding router
 ($C_3$ in Fig. \ref{fig:topo}). In the other cases, the packet is
addressed to the router of $C_p$ directly connected to the next hop
cluster. Furthermore, a router may send an ICMP redirect message to
the sender if it knows a better route to the destination. Therefore we
disable this option for all routers.


\subsection{Processor performance}
\label{sec:local_proc}

The processor performance is controlled by \texttt{cpulim}, the
CPU-lim implementation of Wrekavoc. This ``CPU limiter'' supervises
the execution of the processes belonging to selected
users. \texttt{cpulim} suspends the execution of processes having
consummed more than the required fraction of CPU time (with SIGSTOP
and SIGCONT). Experiments have shown that this method is the most
flexible and accurate way to control the execution of
processes. Because this supervision only affects a given user, it does
not modify the behavior of kernel daemons such as the network stack.


\subsection{Link bandwidth and latency}
\label{sec:local_links}

The Linux Quality of Service subsystem and its userspace tool
\emph{tc} (for Traffic Controller) provide a system of queues and
rules that allows for a flexible shaping and policing of network
packets. Independent kernel modules implement additional features: for
instance, netem (for network emulation) and HTB \cite{htb} (for
hierarchical tocken buffer) provide latency and bandwidth shaping
capabilities. Packets are sent at a constant rate and delay, dropped
if necessary, and bursts in traffic are smoothed. Wrekavoc adds two
sets of \emph{tc} rules for each pair of nodes in the grid in order to
both shape on egress and police the traffic on ingress. \vlan\
considerably simplifies these rules. Each packet being sent through an
interface is independently analysed. The classification and the
resulting shaping of the packet is performed by matching the cluster
of the next hop node. For example, a simple node (not a virtual
router) has only one rule: ``send packet at the bandwidth and latency
of the local cluster''. As a side effect, a packet being sent to a 
node that may be part of the same physical network but does not
belong to any virtual cluster will not be slowed down nor rejected.


\begin{figure*}[!t]
  \centering
  \subfloat[]{
    \includegraphics[width=3.5in]{test_vlan/latency/lats.pdf} 
    \label{fig:accuracy_latency_a}}%
  \subfloat[]{%
    \includegraphics[width=3.5in]{test_vlan/latency/latg.pdf} 
    \label{fig:accuracy_latency_b}}%
  \hfill
  \subfloat[]{%
    \includegraphics[width=3.5in]{test_vlan/udp/udp.pdf}
    \label{fig:accuracy_bandwidth}}%
  \subfloat[]{%
    \includegraphics[width=3.5in]{test_vlan/cpu/cpu.pdf}
    \label{fig:accuracy_cpu}}%
  \caption{Experimental accuracy of \vlan\ emulations: \footnotesize{(a) low
    link latencies ($<10ms$), (b) high link latencies ($>10ms$), (c)
    link bandwidth, (d) processor performance}}
  \label{fig:accuracy}
\end{figure*}


\section{\vlan\ experiments}
\label{sec:experiments}


All experiments are performed on the homogeneous cluster \lids\ 
(Fig. \ref{fig:lids}). \lids\ consists of a switched fast Ethernet
network with a nominal data rate of 100MBits/s interconnecting 9
Compaq AlphaServer DS10. Experimentaly, the links have a latency of
$\lambda=0.1ms$ and a bandwidth of $95.6MBits/s$ (measured with
UDP). Each node has one 617MHz Alpha chip and between 258 and 385 MB
of physical memory. All nodes run Debian GNU/Linux 4.0 with a custom
2.6.21.1 kernel and the MPICH2 library. Eight of these nodes are
considered to be part of the emulated grid, the ninth being the
monitoring front node.


\section{Quality of the emulation}
\label{sec:res_quality}

\subsection{Accuracy}
\label{sec:accuracy}
The latency $\lambda$ of a link is measured by sending one-byte
packets between the two end nodes (Fig. \ref{fig:accuracy_latency_a}
and Fig. \ref{fig:accuracy_latency_b}). The overhead observed when
$\lambda<1ms$ is due to the limitation of the frequency of the CPU
clock (HZ) in kernel $<$ 2.6.23. HZ affects the granularity with which
the netem module is able to delay packets. On \lids\ where $HZ=1024$,
the delay increments are of approximately 1ms. Experimentally, the
observed latency is always slightly higher (less than 1ms) than the
configured latency. This limitation of netem does not impact the
overall accuracy of the link latency.

The maximum available bandwidth for UDP is measured with Iperf
(Fig. \ref{fig:accuracy_bandwidth}). Measurements with TCP on short
periods of time are affected by the TCP slow-start congestion control,
but show comparable results.

The degradation of the CPU performance is estimated by measuring the
execution time of a small computationally intensive program before and
after degradation (Fig. \ref{fig:accuracy_cpu}). The performance
overhead of CPU-lim is almost 40\% (between 0 and 0.1\% of
degradation). However, the relative performance (zeroed at 0.1\% of
degradation, Fig. \ref{fig:accuracy_cpu}) is correct. The loss of
accuracy in the absolute speed of a CPU is not significant as we are
interested in the emulation of different kind of heterogeneity where
the relative performance between nodes is central.



\subsection{Independence}
\label{sec:independence}

The latency and bandwidth of a link are measured when the processor
performance of its endpoints and some of its characteristics are
degraded (Fig. \ref{fig:independency}). Neither latency nor bandwidth
of a link are significantly influenced by the degradation of the
processor performance. Similarly, the latency of a link can be set
indepedently of the bandwidth. However, big latencies ($>130$ms) have
an important impact on the bandwith such as measured by Iperf.

\begin{figure*}[!t]
  \centering
  \subfloat[]{
    \includegraphics[width=3.4in]{test_vlan/dependencies/depcpubdw.pdf} 
    \label{fig:independency_cpu_lat}}
  \subfloat[]{
    \includegraphics[width=3.4in]{test_vlan/dependencies/depcpulat.pdf} 
    \label{fig:independency_cpu_bdw}}
  \subfloat[]{
    \includegraphics[width=3.4in]{test_vlan/dependencies/depbdw.pdf} 
    \label{fig:independency_lat}}
  \subfloat[]{
    \includegraphics[width=3.4in]{test_vlan/dependencies/deplat.pdf} 
    \label{fig:independency_bdw}}
  \caption{Independence of the emulated characteristics of the grid: \footnotesize{
    (a) bandwith as a function of processor degradation, (b) latency
    as a function of processor degradation, (c) latency as a function of
    bandwidth degradation and (d) bandwidth as a function of latency degradation}}
  \label{fig:independency}
\end{figure*}


\subsection{Reproducibility}
\label{sec:repro}

The distributions of the work times of 100 executions of \ms\ in case
(b) for different heterogeneous computational grids (see Section
\ref{sec:res_behavior} below) are represented by a boxplot in
Fig. \ref{fig:repro}. The distribution are very
localized. Therefore, the emulated experimental conditions are highly
reproducible.

\begin{figure}[!t]
  \centering  
  \includegraphics[width=3.5in]{test_vlan/wrekabench2/repro/reprof.pdf} 
  \caption{Boxplots of \ms\ on five \vlan\ emulations}
  \label{fig:repro}
\end{figure}


\section{Run-time behavior}
\label{sec:res_behavior}

We emulate the computational grids of Fig. \ref{fig:grids}. Each grid
emphasizes a different kind of heterogeneity. The heterogeneous grids
$S_C$ are composed of two clusters $C_1$ and $C_2$ with 4 processors
each.  

\begin{figure*}[!t]
  \centering
  \subfloat[\cano]{
    \includegraphics[width=3.4in]{/grids/cano8.pdf} 
    \label{fig:grids_topo}}
  \subfloat[\cpu]{
    \includegraphics[width=3.4in]{/grids/cano_cpu8.pdf} 
    \label{fig:grids_cpu}}
  \hfill
  \subfloat[\bdw]{
    \includegraphics[width=3.4in]{/grids/cano_inter8.pdf} 
    \label{fig:grids_bdw}}
  \subfloat[\lat]{
    \includegraphics[width=3.4in]{/grids/cano_interlat8.pdf} 
    \label{fig:grids_lat}}
  \caption{Emulated heterogeneous computational grids: \footnotesize{(a)
      modification of the topology, (b) Topology of \cano\ with
      degradation of the processor performance in one cluster, (c)
      Topology of \cano\ with inter-cluster bandwidth degradation and
      (d) Topology of \cano\ with inter-cluster latency degradation}}
  \label{fig:grids}
\end{figure*}


In order to study the experimental behavior of these emulated
platforms, we implement \vlanbench, a modified parallel row-wise
matrix-vector multiplication. An empty loop of size $n_l$ is appended
to each row-vector multiplication in order to add a fixed
computational complexity to the algorithm. The ratio of communication
to work time (execution time including computation $t_{comp}$ and
communication $t_{comm}$ times, excluding IO) is $r$. In this way,
we can create computational ($r \approx 0$) and communication ($r \approx
1$) intensive parallel applications. The ratios $r$ are given for three values of $n_l$ in Table \ref{tab:r}. This table also gives the runtimes of the sequential version of the algorithm.

\eq\ shares the blocks of rows equally among the $|P|$ available
processors. If the matrix contains $1000$ rows and $|P|=8$, each
processor computes $l=125$ scalars of the result vector. On the other
hand, \ms\ uses a dynamic master-slave algorithm. The master node
distributes blocks of 5 rows to the idle workers. When a worker
finishes its computation, it sends the result back to the master and
waits for further data. Although only $|P|-1$ processors are effectively
working on the matrix-vector multiplication, \ms\ takes
advantage of the heterogeneous performance of the platform.

% \begin{table}[!t]
%   \center
%   \caption{\vlanbench\ configurations}
%   \begin{minipage}[b]{3in}
%     \center
%     \begin{tabular}{cccc} \hlx{hv}
%       & $n_l$ & $r$\footnote{Measured with \eq\ on \homo} & interpretation \\ \hlx{hv}
%       (a) & 0 & 97.2 & $t_{comm} \gg t_{comp}$ \\
%       (b) & $1.2 \times 10^6$ & 29.04 & $t_{comm} \approx t_{comp}$\\
%       (c) & $10 \times 10^6$ & 4.72 & $t_{comm} \ll t_{comp}$ \\ \hlx{hv}    
%     \end{tabular}
%     \label{tab:r}
%   \end{minipage}
% \end{table}

\ctable[ 
caption=\vlanbench\ configurations and sequential runtimes,
label=tab:r, 
pos=!t,
center,
]
{@{} lcccc @{}}
{ \tnote{r measured with \eq\ on \homo}
}{ 
\toprule 
case & $n_l$&$t_{seq}(s)$ & $r_{eq/\homo}$\tmark[a] & interpretation \\ \midrule
   (a) & 0 & 0.142& 97.2 & $t_{comm} \gg t_{comp}$ \\
   (b) & $1.2 \times 10^6$& 11.9 & 29.04 & $t_{comm} \approx t_{comp}$ \\
   (c) & $10 \times 10^6$& 98.1 & 4.72 & $t_{comm} \ll t_{comp}$ \\ \bottomrule 
} 


\eq\ and \ms\ are executed on the emulated grids with a random
floating point $1000 \times 1000$ matrix and a vector of size $1000$
for the three different values of $n_l$ given in Table \ref{tab:r}. The master process in \ms\ is mapped to processor 1 (Fig. \ref{fig:grids}).

\ctable[
caption=Comparison of \vlan\ work times,
label=tab:work_times,
pos=!t,
star,
]{@{} lcccccccccccc @{}}{
\tnote[a]{Work time, in $s$}
\tnote[b]{ratio $r=\frac{t_{comm}}{t_{comp}+t_{comm}}$ in \%}
}{
  \toprule
    &\multicolumn{4}{c}{(a) $r \approx 1$} & % t_{comm} \gg t_{comp}
    \multicolumn{4}{c}{(b) $r \approx 0.3$} % $t_{comm} \approx t_{comp}
    &\multicolumn{4}{c}{(c) $r \approx 0$} \\ \cmidrule(lr){2-5} % $t_{comm} \ll t_{comp}
    \cmidrule(lr){6-9} \cmidrule(lr){10-13}
    &\multicolumn{2}{c}{\eq} & \multicolumn{2}{c}{\ms}
    &\multicolumn{2}{c}{\eq} & \multicolumn{2}{c}{\ms}
    &\multicolumn{2}{c}{\eq} & \multicolumn{2}{c}{\ms} \\
    \cmidrule(lr){2-3} \cmidrule(lr){4-5} \cmidrule(lr){6-7}
    \cmidrule(lr){8-9} \cmidrule(lr){10-11} \cmidrule(lr){12-13}  
    Grid & $t$\tmark[a] & $r$\tmark[b] & $t$ & $r$ & $t$ & $r$ &
    $t$ & $r$ & $t$ & $r$ & $t$ & $r$\\
    \midrule
\homo & 0.631 & \emph{97.20} & 0.689 & \emph{97.14} & 2.100 & \emph{29.04} & 1.854 & \emph{6.09} & 12.87 & \emph{4.72} & 14.36 & \emph{0.95} \\
\cano & 0.656 & \emph{97.15} & 0.698 & \emph{95.99} & 2.124 & \emph{29.69} & 1.919 & \emph{6.70} & 12.93 & \emph{4.90} & 14.51 & \emph{1.71} \\
\cpu & 0.672 & \emph{96.67} & 0.703 & \emph{94.64} & 4.442 & \emph{14.94} & 3.427 & \emph{4.29} & 76.65 & \emph{0.85} & 34.06 & \emph{0.41} \\
\bdw & 3.641 & \emph{99.5} & 0.955 & \emph{95.59} & 5.107 & \emph{70.73} & 2.787 & \emph{12.20} & 15.92 & \emph{22.74} & 14.91 & \emph{1.12} \\
\lat & 1.979 & \emph{97.97} & 1.234 & \emph{96.79} & 3.384 & \emph{55.20} & 2.641 & \emph{18.71} & 14.16 & \emph{12.89} & 16.42 & \emph{4.17} \\
\bottomrule
}


\begin{figure*}[!t]
  \centering
  \subfloat[$r \approx 1$]{
    \includegraphics[width=2.2in]{test_vlan/wrekabench2/template_result_r1.pdf} 
    \label{fig:parallel_exps_results_a}}
  \subfloat[$r \approx 0.3$]{
    \includegraphics[width=2.2in]{test_vlan/wrekabench2/template_result_r05.pdf} 
    \label{fig:parallel_exps_results_c}}
  \subfloat[$r \approx 0$]{
    \includegraphics[width=2.2in]{test_vlan/wrekabench2/template_result_r0.pdf} 
    \label{fig:parallel_exps_results_b}}
  \caption{Comparison of \vlan\ work times}
  \label{fig:bench_worktimes}
\end{figure*}


\begin{figure*}[!t]
  \centering
  \subfloat[$r \approx 1$]{
    \includegraphics[width=2.2in]{test_vlan/wrekabench2/template_r1.pdf} 
    \label{fig:grid_ms_a}}
  \subfloat[$r \approx 0.3$]{
    \includegraphics[width=2.2in]{test_vlan/wrekabench2/template_r05.pdf} 
    \label{fig:grid_ms_b}}
  \subfloat[$r \approx 0$]{
    \includegraphics[width=2.2in]{test_vlan/wrekabench2/template_r0.pdf} 
    \label{fig:grid_ms_c}}
  \caption{Block distribution in \vlan}
  \label{fig:bench_blocks}
\end{figure*}


% On the homogeneous cluster \homo, \eq\ with (b) and (c) configurations
% exhibit a linear speedup, while in (a) the communications are too slow
% to get any speedup out of the 8 nodes. Thanks to the overlapping of
% communications and computations in \ms, \ms\ in case (b) is better
% than \eq\ even if only 7 nodes are really working. The work times
% increases greatly in (b) and (c) on \cpu. On (a) however, the
% performance decrease is low as the computational load is insignificant
% before the communications. For this kind of heterogeneity, \ms\ is
% clearly much faster. The distribution of the blocks on \cpu\ shows
% that powerful processors perform more work that slower ones. The work
% times increase greatly on \bdw\ and \lat\ in cases (a) and (b) for
% \eq\ because of the importance of the communication. In the same way,
% \ms\ performs adequately as nodes closer to the master carry out more
% work.


\subsection{Work times}
\label{sec:work_times}

Fig. \ref{fig:bench_worktimes} and Table \ref{tab:work_times} compare
the work times and ratios $r$ of \eq\ and \ms\ for various $n_l$ and
heterogeneous grids. On the homogeneous cluster \homo, \eq\ with the
(b) and (c) configurations show an almost linear speedup. For (a)
however, the communication load is too important to get any speedup
out of the 8 nodes. Because in \ms\ the computational work is shared
between 7 nodes only, \ms\ in (a) and (c) is slower than \eq. Still,
in (b) \ms\ is better than \eq. This is due to the overlapping of
communication and computation in the master-slave algorithm. This effect is not
significant in (c), where the communication times are
negligible. Having said that, \ms\ in (c) reaches a linear speedup (for the 7 slave processors).

The work times are slightly bigger in \cano\ than in \homo\ where the
communication pattern involves slightly more communications between
processors from different clusters.

The impact of the processor performance decrease in \cpu\ is low
(albeit present) in (a) because of the low computational
load. But the work time of \eq\ is doubled in (b) and increased
six-fold in (c). The fact that the latter ratio is not equal to five is due to the overhead of CPU-lim; the work times in cluster $C_1$ are all close to 15.4 s, which is five times less than the work times in $C_2$. The ratios $r$ are smaller, demonstrating the impact
of the processor performance decrease on the parallel application
execution. In these cases, the \ms\ algorithm is clearly much faster
even with one less working processor because the work is dynamically
shared.

On \bdw\ and \lat, the work times increase greatly in (a) and more
slightly in (b) and (c). At the same time, the ratios $r$ are
generally bigger, demonstrating the impact of the link performance
decrease on the parallel application execution. The application is
also more sensitive to the degradation of the bandwidth (grid
\bdw). Indeed, the messages exchanged between processors are big
enough for the bandwidth to have a significant impact on the
application. \ms\ performs generally better than \eq\ because of the
overlapping communications and communications. Nevertheless, it is not
always true since the \ms\ algorithm involves more exchanges of messages
between processors. This effect is more sensitive to the latency, and
is as expected noticeable on \lat\ in (a) and (c).


\subsection{Block decomposition in \ms}
\label{sec:block_decomposition}

Fig. \ref{fig:bench_blocks} shows the distribution of the blocks among
the processors performed by the \ms\ algorithm. Altogether, on the
heterogeneous grids, the processors in $C_1$ receive more work than
those in $C_2$. This is due to the topology of the network: the nodes
closer to the master can poll the master faster. The heterogeneity of
the grids increases this effect. Besides, the routers of the network,
processors 0 and 4, generally receive a smaller or equal load than the
processors in the same cluster. Indeed, these nodes are also busy
forwarding packets between processors.

For an application where the computational load is not significant
(Figure \ref{fig:grid_ms_a}), the nodes of $C_1$ receive a lot more
work than those in $C_2$ when the inter-cluster link is degraded (\bdw
and \lat). However, the impact of the computational performance of the
grid on the dynamic allocation of rows is low (\cpu). The situtation
is reversed in Figure \ref{fig:grid_ms_c}, where the communication are
negligible compared to the computation time. However, when both the
communication and computation requirements of the application are
significant (Figure \ref{fig:grid_ms_b}), both the processor
performance (\cpu) and the network (\bdw and \lat) heterogeneity of
the grids influence the distribution of the load.

Overall, the emulated platforms perform as expected. These
observations demonstrate the utility of \vlan.


\section{Conclusion}
\label{sec:conclusion}

We have developped a tool to emulate, control and monitor a heterogeneous
computational grid. \vlan\ extends the main concepts implemented in
Wrekavoc to computational grids, and adds a virtual topology
emulator. \vlan\ works on the routing tables and processor performance
of the nodes as well as on the latency and bandwidth of the network links. It
is designed to be fast, portable and easy-to-use, while guaranteeing the
accuracy and reproducibility of the emulation.  

Future work on \vlan\ should focus on the emulation of dynamic grids,
where the routes between hosts are automatically determined by the
characteristics of the network. Realistic emulation of computational
grids would also have to take into account congestion and reordering
issues.


%Acknowledgements(Fundings, ...)
\section*{Acknowledgment}
This work was funded by a Discovery grant of the Natural Sciences and Engineering Research Council of Canada.

%Biblio (to replace by a thebibliography environment before submitting)

%\IEEEtriggeratref{2} %For last page column alignment
\bibliographystyle{IEEEtran}
%\bibliography{IEEEabrv,./mybiblio}

% Generated by IEEEtran.bst, version: 1.12 (2007/01/11)
\begin{thebibliography}{13}
\providecommand{\url}[1]{#1}
\csname url@samestyle\endcsname
\providecommand{\newblock}{\relax}
\providecommand{\bibinfo}[2]{#2}
\providecommand{\BIBentrySTDinterwordspacing}{\spaceskip=0pt\relax}
\providecommand{\BIBentryALTinterwordstretchfactor}{4}
\providecommand{\BIBentryALTinterwordspacing}{\spaceskip=\fontdimen2\font plus
\BIBentryALTinterwordstretchfactor\fontdimen3\font minus
  \fontdimen4\font\relax}
\providecommand{\BIBforeignlanguage}[2]{{%
\expandafter\ifx\csname l@#1\endcsname\relax
\typeout{** WARNING: IEEEtran.bst: No hyphenation pattern has been}%
\typeout{** loaded for the language `#1'. Using the pattern for}%
\typeout{** the default language instead.}%
\else
\language=\csname l@#1\endcsname
\fi
#2}}
\providecommand{\BIBdecl}{\relax}
\BIBdecl

\bibitem{challenges}
K.~Devine, B.~Hendrickson, E.~Boman, M.~S. John, and C.~Vaughan, ``Design of
  dynamic load-balancing tools for parallel applications,'' in \emph{ICS '00:
  Proceedings of the 14th international conference on Supercomputing}.\hskip
  1em plus 0.5em minus 0.4em\relax New York, NY, USA: ACM, 2000, pp. 110--118.

\bibitem{pagridL}
X.~Wu and E.~Aubanel, ``Incorporating latency in heterogeneous graph
  partitioning,'' \emph{Workshop on Parallel and Distributed Scientific and
  Engineering Computing (PDSEC), in Proc. 21st Intl. Parallel and Distributed
  Processing Symposium}, pp. 25--29, March 2007.

\bibitem{simgrid}
L.~M. A.~Legrand and H.~Casanova, ``Scheduling distributed applications: the
  simgrid simulation framework,'' \emph{CCGRID}, pp. 138--145, 2003.

\bibitem{bricks}
A.~Takefusa, S.~Matsuoka, K.~Aida, H.~Nakada, and U.~Nagashima, ``Overview of a
  performance evaluation system for global computing \newpage scheduling algorithms,''
  in \emph{HPDC '99: Proceedings of the 8th IEEE  International Symposium on
  High Performance Distributed Computing}.\hskip 1em plus 0.5em minus
  0.4em\relax Washington, DC, USA: IEEE Computer Society, 1999, p.~11.

\bibitem{Wrekavoc}
L.-C. Canon and E.~Jeannot, ``Wrekavoc: a tool for emulating heterogeneity,''
  \emph{Parallel and distributed Processing Symposium, 2006. IPDPS 2006 20th
  International}, pp. 0--0, April 2006.

\bibitem{modelnet}
D.~Gupta, K.~Yocum, M.~McNett, A.~C. Snoeren, G.~M. voelker, and A.~Vahdat,
  ``To infinity and beyond: Time-warped network emulation,'' in
  \emph{Proceedings of the 3rd ACM/USENIX Symposium on Networked Systems Design
  and Implementation (NSDI)}, May 2006.

\bibitem{wanem}
\BIBentryALTinterwordspacing
T.~C. Services, \emph{WANem 1.1 Wide Area Network Emulator}, TCS Innovations
  Lab - Performance Engineering Research Center, 10 2007. [Online]. Available:
  \url{http://wanem.sourceforge.net}
\BIBentrySTDinterwordspacing

\bibitem{vlan}
\BIBentryALTinterwordspacing
IEEE, ``Ieee standard for local and metropolitain area networkd -- virtual
  bridged local area networks, 802.1q,'' IEEE Computer Society, Tech. Rep.,
  2005. [Online]. Available:
  \url{http://standards.ieee.org/getieee802/download/802.1Q-2005.pdf}
\BIBentrySTDinterwordspacing

\bibitem{nmap}
\BIBentryALTinterwordspacing
Fyodor, ``Nmap reference guide,'' Insecure.org, Tech. Rep., 2007. [Online].
  Available: \url{http://nmap.org/man/}
\BIBentrySTDinterwordspacing

\bibitem{iperf}
\BIBentryALTinterwordspacing
M.~Gates, A.~Tirumala, J.~Dugan, and K.~Gibbs, ``Iperf user docs,'' NLANR,
  Tech. Rep., 2003. [Online]. Available:
  \url{http://dast.nlanr.net/Projects/Iperf}
\BIBentrySTDinterwordspacing

\bibitem{graphviz}
E.~R. Gansner and S.~C. North, ``An open graph visualization system and its
  applications to software engineering,'' \emph{Softw. Pract. Exper.}, vol.~30,
  no.~11, pp. 1203--1233, 2000.

\bibitem{vtk}
W.~J. Schroeder, K.~M. Martin, and W.~E. Lorensen, ``The design and
  implementation of an object-oriented toolkit for 3d graphics and
  visualization,'' in \emph{VIS '96: Proceedings of the 7th conference on
  Visualization '96}.\hskip 1em plus 0.5em minus 0.4em\relax Los Alamitos, CA,
  USA: IEEE Computer Society Press, 1996, pp. 93--ff.

\bibitem{htb}
\BIBentryALTinterwordspacing
M.~Devera, ``Htb linux queuing discipline manual - user guide,'' M. Devera web
  site, Tech. Rep., 2002. [Online]. Available:
  \url{http://luxik.cdi.cz/~devik/qos/htb/manual/userg.htm}
\BIBentrySTDinterwordspacing

\end{thebibliography}

\end{document}



%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t
%%% End: 
