\RequirePackage{fix-cm}
\documentclass[smallextended]{svjour3}       % onecolumn (second format)
%\documentclass[twocolumn]{svjour3}          % twocolumn
%\documentclass[review]{svjour3}
\RequirePackage{fix-cm}
\smartqed  % flush right qed marks, e.g. at end of proof
\usepackage{graphicx}
\usepackage{lineno,hyperref}
\usepackage{graphicx}
\usepackage{amsmath, amssymb, amscd}
\usepackage{multirow}
\usepackage[lined,linesnumbered,ruled]{algorithm2e}
\usepackage{subfig}
\usepackage[]{color}
\usepackage{setspace}
\usepackage{threeparttable}
\usepackage{fancybox}
\usepackage{pifont}
\usepackage{framed}
\usepackage{epigraph}
\usepackage{calc}
\usepackage{cite}
\usepackage[table]{xcolor}

%\usepackage{datetime}
%
%\newcommand{\HRule}{\rule{\linewidth}{0.5mm}}
%
% %The following definition helps to indent the paragraph
% \newenvironment{myindentpar}[1]%
% {\begin{list}{}%
%          {\setlength{\leftmargin}{#1}}%
%          \item[]%
% }
% {\end{list}}
%
%\newtheorem{example}{Example}
%\newtheorem{theorem}{Theorem}
%\newtheorem{fact}{Fact}
%\newtheorem{corollary}{Corollary}
\newtheorem{Lemma}{Lemma}
\newtheorem{Assumption}{Assumption}
\newtheorem{Property}{Property}
\newtheorem{Definition}{Definition}
\newtheorem{Observation}{Observation}
\nolinenumbers


%\SetEndCharOfAlgoLine{}
%\DontPrintSemicolon

%\journal{Journal of \LaTeX\ Templates}
\graphicspath{{./figures/}}

%Preamble for Definitions
%-----------------------------------------------------------------
\newcommand{\equals}{\stackrel{\mathrm{def}}{=}}
\newcommand{\PerCoreAnalysis}{\operatorname{PerCoreAnalysis}}
\newcommand{\SystemAnalysis}{\operatorname{SystemAnalysis}}
\newcommand{\status}{\operatorname{status}}
\newcommand{\stepindex}{\operatorname{stepnum}}
\newcommand{\Success}{\operatorname{success}}
\newcommand{\Failure}{\operatorname{failure}}
\newcommand{\core}{\operatorname{core}}
\newcommand{\capacity}{\operatorname{capacity}}
%\renewcommand{\algorithmicrequire}{\textbf{Input:}}
%\renewcommand{\algorithmicensure}{\textbf{Output:}}
%\renewcommand{\algorithmiccomment}[1]{\textbf{\emph{// #1}}}
% \pagenumbering{arabic}
%\newcommand{\TRUE}{\operatorname{TRUE}}
\newcommand{\every}{\operatorname{each}}
%\newcommand{\FALSE}{\operatorname{FALSE}}
\newcommand{\AllRiConverge}{\operatorname{RiModified}}
\newcommand{\AllCoreConverge}{\operatorname{KeepLooping}}
\newcommand{\TR}{\operatorname{TR}}
\newcommand{\TBR}{\operatorname{TBR}}
\newcommand{\NBR}{\operatorname{NBR}}
\newcommand{\CIN}{\operatorname{carry\_in}}
\newcommand{\AGAP}{\operatorname{a\_gap}}
\newcommand{\MAXREQUESTS}{\operatorname{maxreq}}
\newcommand{\TOTAL}{\operatorname{total}}
\newcommand{\RES}{\operatorname{res}}
\newcommand{\LP}{\operatorname{lp}}
\newcommand{\HP}{\operatorname{hp}}
\newcommand{\BR}{\operatorname{BR}}
\newcommand{\BL}{\operatorname{BL}}
\newcommand{\BP}{\operatorname{BP}}
\newcommand{\ARL}{\operatorname{ARL}}
\newcommand{\ARH}{\operatorname{ARH}}
\newcommand{\paths}{\operatorname{paths}}
\newcommand{\head}{\operatorname{head}}
\newcommand{\smiddle}{\operatorname{body}}
\newcommand{\tail}{\operatorname{tail}}
\newcommand{\dsr}{\operatorname{dsr}}
%\newcommand{\equals}{\stackrel{\operatorname{def}}{=}}
\newcommand{\MREQUESTS}{\operatorname{maxreq1}}
\newcommand{\NReq}{\operatorname{NReq}}
\newcommand{\PCRP}{\operatorname{PCRP}}


\newtheorem{Example}{Example}
\newcommand{\argmax}{\operatornamewithlimits{argmax}}

\newcommand{\todo}[1]{{\color{red}todo: #1\\}}

\newcommand{\todoReply}[1]{{\color{blue}Ans: #1\\}} 
\newcommand{\Treqprof}[1]{\mathbb{T}_{#1}}
\newcommand{\SamplingRegionSize}[1]{L^{\operatorname{reg-size}}_{#1}}
\newcommand{\Regions}[1]{\mathbb{G}_{#1}}
\newcommand{\Requests}[1]{\mathbb{R}_{#1}}
\newcommand{\NbReqPerRegion}[2]{\eta_{#1, #2}}
\newcommand{\NbReqPerTask}[1]{\eta_{(#1)}}
\newcommand{\request}[2]{\operatorname{req}_{#1, #2}}
%\newcommand{\reqrel}[2]{\operatorname{rel}_{#1, #2}}
%\newcommand{\reqserv}[2]{\operatorname{serv}_{#1, #2}}
\newcommand{\reqrel}[2]{\operatorname{rel}_{#1, #2}}
\newcommand{\reqserv}[2]{\operatorname{srv}_{#1, #2}}
\newcommand{\BusAvailability}[1]{\mathbb{B}_{#1}}
\newcommand{\Tmin}[2]{{\cal T}^{\min}_{#1}(#2)}
\newcommand{\Tmax}[2]{{\cal T}^{\max}_{#1}(#2)}
\newcommand{\Assignment}[2]{\sigma_{#1}(#2)}
\newcommand{\SetOfConstraints}[1]{\mathbb{C}_{#1}}
\newcommand{\LBslot}[2]{\operatorname{LBslot}_{#1, #2}}
\newcommand{\UBslot}[2]{\operatorname{UBslot}_{#1, #2}}
\newcommand{\LBTime}[2]{\operatorname{LBTime}_{#1, #2}}
\newcommand{\UBTime}[2]{\operatorname{UBTime}_{#1, #2}}
\newcommand{\TaskLBslot}[1]{\operatorname{LBslot}_{#1}}
\newcommand{\TaskUBslot}[1]{\operatorname{UBslot}_{#1}}
\newcommand{\TaskLBTime}[1]{\operatorname{LBTime}_{#1}}
\newcommand{\TaskUBTime}[1]{\operatorname{UBTime}_{#1}}
%\newcommand{\tsum}{\operatorname{tsum}}
\newcommand{\CumulDelay}[1]{D(#1)}
\newcommand{\AllAssignments}[3]{\operatorname{FBS}_{#1}^{#2}(#3)}
\newcommand{\AllAssignmentSlot}[3]{\operatorname{FBS}_{#1}^{#2}(#3)}
\newcommand{\GroupAssign}[4]{A^{#2, #4}_{#1}(#3)}
\newcommand{\Delay}[4]{D^{#2, #4}_{#1}(#3)}
\newcommand{\SDelay}[3]{D^{#2}_{#1}(#3)}
\newcommand{\Sreqrel}[4]{r^{#2, #4, #3}_{#1}}
\newcommand{\Sreqserv}[4]{s^{#2, #4, #3}_{#1}}
\newcommand{\Assign}[5]{\sigma^{#2, #4, #3}_{#1}(#5)}
\newcommand{\ConstantOne}{{\cal K}_{k+1}^{\operatorname{min}}}
\newcommand{\ConstantTwo}{{\cal K}_{k+1}^{\operatorname{max}}}

\newcommand{\AssignmentOne}[2]{\sigma_{#1}(#2)}
\newcommand{\AssignmentTwo}[2]{\sigma_{#1}'(#2)}
\newcommand{\DelayOne}[2]{{\cal D}_{#1}(#2)}
\newcommand{\DelayTwo}[2]{{\cal D}_{#1}'(#2)}
\newcommand{\reqrelOne}[2]{\operatorname{rel}_{#1,#2}}
\newcommand{\reqrelTwo}[2]{\operatorname{rel}_{#1,#2}'}
\newcommand{\reqservOne}[2]{\operatorname{srv}_{#1,#2}}
\newcommand{\reqservTwo}[2]{\operatorname{srv}_{#1,#2}'}


\newcommand{\frameSize}{\ensuremath{\textit{f}}}
\newcommand{\slots}{\ensuremath{\phi}}
\newcommand{\MaxRegDelay}{{\operatorname{MaxRegDelay}}}

\newcommand{\SlotSet}[1]{\mathbb{S}_{#1}} 
\newcommand{\Mapping}{\mathbb{M}}

\newcommand{\UBslotNew}[1]{\operatorname{UBslot}_{#1}}
\newcommand{\Cell}[2]{\operatorname{cell}(#1, #2)}
\newcommand{\CellElement}[2]{e_{#1, #2}}
\newcommand{\finalResult}{\operatorname{maxDelay}}

\newcommand{\paragraphSpace}{\hspace{1mm}}

%-----------------------------------------------------------------


\bibliographystyle{spmpsci}

\begin{document}
\title{A Framework for Memory Contention Analysis in Multi-Core Platforms}

\author{Dakshina Dasari     \and      Vincent Nelis \and          Benny Akesson %etc.
}


\institute{   Dakshina  Dasari \at
              CISTER-Research Unit \\
              %Tel.: +123-45-678910\\
              %Fax: +123-45-678910\\
              \email{dandi@isep.ipp.pt}           %  \\
%             \emph{Present address:} of F. Author  %  if needed
           \and
           Vincent Nelis \at
              CISTER-Research Unit \\
              \email{nelis@isep.ipp.pt}           %  \\
              \and 
              Benny Akesson \at
              Czech Technical University in Prague  \\
              \email{kessoben@fel.cvut.cz}
}

\date{Received: date / Accepted: date}
% The correct dates will be entered by the editor


\maketitle


\begin{abstract}
The last decade has witnessed a major shift towards the deployment of
embedded applications on multi-core platforms. However, real-time
applications have not
been able to fully benefit from this transition, as the computational
gains offered by multi-cores are often offset by performance degradation due to shared
resources, such as main memory. To efficiently use multi-core
platforms for real-time systems, it is hence essential to tightly
bound the interference when accessing shared resources. 
Although there has been much recent work in this area, a
remaining key problem is to address the diversity of memory arbiters in the analysis
to make it applicable to a wide range of systems.

This work addresses this problem of diverse arbiters by proposing a general framework to
compute the maximum interference caused by the shared memory bus and
its impact on the execution time of the tasks running on the cores,
considering different bus arbiters. Our novel approach clearly
demarcates the arbiter-dependent and independent stages in the
analysis of these upper bounds. The arbiter-dependent phase takes as inputs, the
arbiter and the task memory-traffic pattern and produces a
model of the availability of the bus to a given task. Then, based on
the availability of the bus, the arbiter-independent phase determines
the worst-case request-release scenario that maximizes the interference
experienced by the tasks due to the contention for the bus. 
We show that the framework can deal with diverse arbiters by
applying it to a memory bus shared by a
fixed-priority arbiter, a time-division multiplexing (TDM) arbiter, and an
unspecified work-conserving arbiter using applications from the MediaBench test
suite. We also experimentally evaluate the quality of the analysis by comparison
with a state-of-the-art TDM analysis approach and consistently showing 
a considerable reduction in maximum interference.
\end{abstract}


\nolinenumbers

\section{Introduction}

Embedded systems are increasingly based on multi-core
platforms to cater to increasing performance demands while satisfying power constraints~\cite{Kollig09,Berkel09,Benini12DATE,nowotsch2012leveraging}. These platforms reduce cost by sharing resources, such as buses and external memories between the applications executing on the cores.
Many embedded applications have been successfully deployed on these platforms and are harnessing the
benefits of their computational capabilities.  However, system designers are unable to leverage the entire potential provided by these platforms to deploy hard real-time applications, for which
upper bounds on worst-case execution times (WCET) must be determined at design time and deadlines must be strictly met at run time.
Although techniques to determine the WCET of tasks executing on a single-core architecture~\cite{wcet-summary} exist,
there are still many open issues in a multi-core setting due to the resource sharing between the cores~\cite{Dasari13SIES}. 
This paradigm of resource sharing does not adhere to the temporal and spatial isolation of components desired by the system designers,
because it results in \emph{contention} between tasks executing asynchronously on different cores, which in turn further complicates the process of computing the WCETs of the tasks. 
Additionally, resource sharing also introduces a circular dependence between WCET and inter-core interference which complicates the WCET analysis process. 
This problem is important since memory-intensive 
tasks are stalled for considerable time during data transfers between the cores and the memory~\cite{nowotsch2012leveraging}. 
Failure to capture this contention at design time results in
non-conservative bounds, while pessimistic analyses may result in substantial over-estimation and lead to under-utilized resources. A particular challenge is that the contention is heavily dependent on the arbitration policy of the memory bus, which ranges from work-conserving priority-based policies in high-performance soft real-time systems to
non-work-conserving time-division-multiplexing (TDM) for critical systems that require robust partitioning.

Existing work addresses the problem of deriving upper bounds on memory bus contention, but the analysis is
tightly coupled to a particular arbitration policy, such as
TDM~\cite{Rosen07_rtss07,Chatto,Timon,Schra2010,Schra2011,UnifiedWCET:2014} or
non-specified work-conserving arbiters~\cite{Icess11,Ernst}, and a generic framework to handle different 
arbitration mechanisms does not exist. As a result, a change of memory arbiter currently implies
adopting a new analysis with different inputs and assumptions.

This article addresses this problem by proposing a general framework for
memory bus contention analysis that addresses the range of arbitration
policies in multi-core systems.
The three main contributions of this work are: 
1) A model that captures the best-case and worst-case availability
of the shared memory bus. This model can be applied to a range of arbitration policies in a streamlined manner, and we
demonstrate its flexibility by applying it to two very different cases, being
non-work-conserving TDM and work-conserving fixed-priority arbitration. We also show how round-robin arbitration and unspecified work-conserving arbiters can be captured as special-cases of these two arbiters.
2) An algorithm that uses the proposed bus model and leverages the task request-profiles to compute the maximum memory bus contention that a given task can incur.
3) A method to tighten the computed bounds and increase the efficiency and scalability of the
algorithm by splitting the task request-profile into multiple smaller sampling regions.
%
We experimentally evaluate the proposed approach by
applying it to a multi-core system providing access
to an external DRAM via a shared bus. The flexibility of the framework is demonstrated by applying it to different arbiters on a set of applications from the MediaBench benchmark~\cite{lee1997mediabench}. 
The tightness of the WCET bounds is also evaluated for different sample region sizes.
Lastly, our results are compared to a state-of-the-art approach~\cite{SchranWRT}, showing that our framework provides
tighter bounds for TDM arbitration.

The rest of this article is organized as
follows. Section~\ref{sec:model} presents the system model, followed
by an overview of our four-step approach in
Section~\ref{sec:probdef}. The different steps of our approach are
then discussed in detail, starting with our novel bus availability model
in Section~\ref{sec:bus_availability}. We then proceed by showing how
to capture the worst-case interference caused by the shared memory bus in Sections~\ref{sec:wc_delay}
and~\ref{sec:wc_assignment}. This is followed by a method to
improve the accuracy of the analysis and reduce its computation time in
Section~\ref{sec:complexity_reduction}. 
Related work is discussed in Section~\ref{sec:related_work}, before we 
experimentally evaluate the approach in Section~\ref{sec:experiments}.
The article is concluded in
Section~\ref{sec:conclusion}.


\section{System Model}
\label{sec:model}

First, we present the platform model, followed by a characterization
of the tasks and their corresponding request profiles. We then explain
the assumptions on the task scheduler, before formulating the exact problem studied in this work.

\subsection{Platform Model}
\label{ssec:platform_model}

The considered multicore platform contains $m$ cores denoted by $\pi_1, \pi_2, \ldots, \pi_m$. 
It is assumed that the cores do not share cache memory or that all levels of shared cache are disabled or partitioned. 
This assumption of a private/partitioned cache aligns with the recommendations for certification of hard real-time systems~\cite{IEC61508}. A cache miss in the last-level cache results in a memory request to the shared DRAM.
The cores are assumed to be fully timing compositional~\cite{Wilhelm}, such as the ARM7, and stall on every memory request. These assumptions are consistent with current state-of-the-art approaches~\cite{Jian,SchranWRT,Schra2010,Rosen07_rtss07} for bus contention analysis.

The cores communicate with the memory through a shared memory bus.
The bus controller grants access to the bus in units of \emph{bus slots}, where each slot is of constant length $\TR$ that corresponds to an upper bound on the time to serve a memory request, expressed in clock cycles at the frequency of the processor. Contention between the cores is resolved by the memory bus arbitration policy, which depends on the considered platform. Fixed-priority arbitration are typically used in systems with diverse response time requirements, TDM in systems that require robust partitioning between applications, and round robin when a simple notion of fairness between applications executing on different cores is required.

\subsection{Task Model}
\label{ssec:task_model}

The applications are modeled by a set, $\tau$, of sporadic and constrained-deadline tasks in which a task $\tau_i \in \tau$ is characterized by three parameters: $C_i$, $T_i$, and $D_i \leq T_i$. The parameter $C_i$ denotes an upper bound on the execution time of task $\tau_i$ when it executes uninterrupted in \emph{isolation}, i.e., with no contention on the shared memory bus; $T_i$ denotes the minimum interval between two consecutive activations of $\tau_i$ (the period) and $D_i$ is the deadline of the task. In other words, every task $\tau_i$ releases a (potentially infinite) sequence of jobs, each such job must execute for at most $C_i$ time units within $D_i$ time units from its release and two successive jobs of the same tasks are released at least $T_i$ time units apart.

The parameter $C_i$ can be computed by well-known techniques in WCET analysis~\cite{wcet-summary}. This work focuses on computing $C_i'$ , which denotes an upper bound on the execution time when $\tau_i$ executes \emph{with} contention on the memory bus, i.e., when co-scheduled tasks are running on the other cores. Clearly, the value of $C_i'$ is not an inherent property of $\tau_i$ but depends on the arbitration policy of the memory bus and on the memory request pattern of the tasks executing concurrently on the other cores during $\tau_i$'s execution. Note that the proposed application model is very general, as it only assumes sporadic constrained-deadline tasks executing on in parallel on cores and accessing a shared memory. As such, the work applies to many application domains of real-time systems, e.g. automotive and avionics.

\subsection{Request and Region Modeling}
\label{ssec:task_request_profiles}

We proceed by introducing the notations required for modeling the memory traffic generated by the tasks. To gain a deeper insight into the request distribution, the execution time span of the job is divided into \emph{sampling regions} or sampling intervals.
The entire execution of each job of task $\tau_i$ is divided into $x_i = C_i/\SamplingRegionSize{i}$ sequential temporal sampling regions , where $\SamplingRegionSize{i}$ is the duration (in time units, for e.g. processor cycles) of each region. 
For brevity, we shall use "`task"' (instead of job of a task) to refer to the execution instance in the rest of the document.    

For each job of task $\tau_i$, the maximum number of memory requests issued \emph{within} each region is captured by executing the task a significant number of times over different inputs and taking the maximum value. Some measurement-based analysis techniques have been proposed to generate test data that would target good code coverage~\cite{measurement}. Figure~\ref{fig:reqmodel} depicts this task segmentation. It illustrates a same task run three times over different sets of inputs. During the first run, the number of requests issued in each region is recorded and depicted as a yellow box. In that run, the task completes within the fourth region. The green and orange boxes represent two other runs of the same task that complete during the third and sixth region, respectively. The red boxes are the maximum values observed in all runs in each region. Note that the flow-control path leading to the maximum number of memory requests in a region may not be the one that results in the maximum execution time of the application, which introduces some pessimism in our analysis. It is key to understand that the regions defined in our analysis are a sequence of equidistant sampling \emph{points in time} and not the start and end points of a function or any other chunk of code commonly referred to as basic blocks in the literature.

This measurement-based method returns a set $\Regions{i} = \{\NbReqPerRegion{i}{1}, \NbReqPerRegion{i}{2}, \ldots, \NbReqPerRegion{i}{x_i}\}$, where each $\NbReqPerRegion{i}{g}$ ($g \in [1 \dots x_i]$) is the observed maximum number of requests that task $\tau_i$ can generate \emph{within} its $g$'th sampling region. 
Note that $\sum_{g=1}^{x_i} \NbReqPerRegion{i}{g}$ denotes the upper bound on the maximum number of requests that task $\tau_i$ can generate during the entire execution of one of its jobs and, for simplicity, we sometimes use the notation $\NbReqPerTask{i}$ to denote this value, i.e., $\NbReqPerTask{i} \equals \sum_{g=1}^{x_i} \NbReqPerRegion{i}{g}$.

\begin{figure}[htb]
\centering
\includegraphics[width=1.0\columnwidth]{sampling_regions.jpg}
\caption{Illustration of task-region profiles, each with length $\SamplingRegionSize{i}$ time units} 
\label{fig:reqmodel}
\end{figure}


We next denote by $\Requests{i} = \{\request{i}{1}, \request{i}{2}, \ldots, \request{i}{\NbReqPerTask{i}}\}$, the set of all requests that $\tau_i$ can generate during its execution. Each request $\request{i}{k}$ is modeled by the tuple $\left\langle \reqrel{i}{k}, \reqserv{i}{k} \right\rangle$, where $\reqrel{i}{k}$ and $\reqserv{i}{k}$ denote the release and service time of request $\request{i}{k}$ during $\tau_i$'s execution, respectively. Together, these values enable the \emph{cumulative delay} of $\tau_{i}$ due to shared
resource accesses to be computed as
$\sum_{k=1}^{\NbReqPerTask{i}} (\reqserv{i}{k}- \reqrel{i}{k})$.
Obviously, the exact values of $\reqrel{i}{k}$ and $\reqserv{i}{k}$ are not known before run time.

\subsection{Scheduler Specification}

We consider a partitioned scheme of task assignment in which each task is assigned to a core at design time and is not allowed to migrate from its assigned core to another one at run time, i.e. a fully partitioned non-migrative scheduling scheme. Regarding the scheduling policy on each core, we consider a non-preemptive scheduler and hence do not deal with cache-related and task-switching overheads. 
We make the non-work-conserving assumption as follows: whenever a task completes earlier than its WCET (say on its assigned CPU $\pi_p$), the scheduler idles the core $\pi_p$ up to the theoretical WCET of the task (which requires an addditional timer mechanism for the scheduler). This assumption is made to ensure that the number of bus requests within a time window computed at design time is not higher
at run time due to early completion of a task and the subsequent early execution of the following tasks. 


\subsection{Problem Statement}

After specifying the model for the platform, the tasks, and their
requests release and service pattern, we are now ready to state the problem addressed in this
work. Given: 
\begin{enumerate}
\item a multi-core platform conforming to the model described in Section~\ref{ssec:platform_model}, 
\item a set of tasks and their WCET $C_i$ \emph{in isolation}, as described in Section~\ref{ssec:task_model}, and 
\item the region-profiles of all these tasks, described in Section~\ref{ssec:task_request_profiles}, 
\end{enumerate}
the problem is to compute the WCET $C_i'$ of $\tau_i$ when $\tau_i$ executes concurrently with other
tasks. This implies finding a tight upper bound
on the cumulative delay incurred by 
all memory requests of $\tau_i$ considering the contention for the memory bus.
For each task $\tau_j$, we must choose a region length $\SamplingRegionSize{j}$ and obtain as described in Section~\ref{ssec:task_request_profiles}, the set of region-profiles $\Regions{j} = \{\NbReqPerRegion{j}{1}, \NbReqPerRegion{j}{2}, \ldots, \NbReqPerRegion{j}{x_j}\}$. Then, 
we must compute the release times and service times, $\reqrel{j}{k}$ and $\reqserv{j}{k}$, $\forall \request{j}{k} \in \Requests{j}$, under different arbitration policies, that result in the \emph{maximum cumulative delay}
$\DelayOne{i}{\NbReqPerTask{i}} = \sum_{k=1}^{\NbReqPerTask{i}} (\reqserv{i}{k}- \reqrel{i}{k})$.


%----------------------------------------------------------------------
\section{Overview}
\label{sec:probdef}

We proceed by giving
a high-level overview of the proposed analysis framework.
The analysis is presented in four main steps, illustrated in Figure~\ref{fig:unify}, 
to gradually build up complexity. The four steps are briefly
summarized in this section, while the following sections
present each step in detail.

\begin{figure}[htb]
\centering
\includegraphics[width=0.9\columnwidth]{unify-crop.pdf}
\caption{The main steps of the general analysis framework.} 
\label{fig:unify}
\end{figure}

\subsubsection*{Step 1: Modeling the availability of the bus}

Given that several tasks are co-scheduled on different cores and contend for the same shared bus, a given task $\tau_i$ may not get access to the bus immediately after generating a request. That is, when a task $\tau_i$ generates a memory request and therefore requests access to the bus, the bus may or may not be immediately available to serve that request. Section~\ref{sec:bus_availability} shows how we propose to model the availability of the bus to a given task $\tau_i$ using a generic model $\BusAvailability{i} = \left\langle \Tmin{i}{}, \Tmax{i}{} \right\rangle$. The functions $\Tmin{i}{j}$ and $\Tmax{i}{j}$ are an abstraction of the shared resource that
represent the earliest and latest instants at which the bus is available to $\tau_i$ for the $j^{th}$ time, as stated in Definitions~\ref{def:tmin} and~\ref{def:tmax}, respectively. From now on, we refer to the bus slots that are available to the task $\tau_i$ as \emph{the free bus slots} of $\tau_i$. We also re-state that each bus-slot is of duration $\TR$ and refers to an upper bound on the time to make one memory access. Hence, we assume a discrete time-line (in units of $\TR$) in which a request can be serviced only starting at the beginning of a time slot. 

\begin{Definition}
\label{def:tmin}
The function $\Tmin{i}{j}$ represents the earliest time-instant at which the bus may be available to task $\tau_i$ for the $j^{th}$ time, or in other words, the earliest time-instant of the $j^{th}$ free bus slot of $\tau_i$.
\end{Definition}

\begin{Definition}
\label{def:tmax}
The function $\Tmax{i}{j}$ represents the latest time-instant at which the bus may be available to task $\tau_i$ for the $j^{th}$ time, or in other words, the latest time-instant of the $j^{th}$ free bus slot of $\tau_i$. 
\end{Definition}

The order in which the bus slots are granted to the tasks depends on the arbitration mechanism. It hence follows that the two functions $\Tmin{i}{}$ and $\Tmax{i}{}$ also depend on the arbitration mechanism, as shown in Figure~\ref{fig:unify}. As we shall see in the next section, for some arbitration policies like Time-Division Multiplexing, the availability of the bus modeled by $\Tmin{i}{}$ and $\Tmax{i}{}$ is independent from the traffic generated from the other cores, while for many other arbitration mechanisms, such as fixed-priority scheduling, the interfering requests do influence the time at which the analyzed task $\tau_i$ can access the bus and hence play an important role in the computation of the functions $\Tmin{i}{}$ and $\Tmax{i}{}$. 

Figure~\ref{fig:tmintmaxexample} illustrates the earliest and the latest instants at which a given slot is available to task $\tau_i$. 
As seen in the figure, the earliest instant at which slot 1 may be available to task $\tau_i$ , $\Tmin{i}{1}$, is at time $0$, meaning that either there are no pending requests from the other tasks that are co-scheduled on the interfering cores in $\bar{\pi}(i)$, or the task $\tau_i$ is executing in isolation and thus there is no contention on the bus. 
In contrast, we have $\Tmax{i}{1} = 5$ in this example, which means that $\tau_i$ may have to wait at most 4 slots before getting access to the bus.
Similarly, the availability for the subsequent slots is depicted in the figure.
Note that this illustrative example is not representative of any particular arbitration mechanism. 
%Section~\ref{sec:bus_availability} explains the computation of these functions for two different arbitration policies.
 

\begin{figure}[htb]
\centering
\includegraphics[width=1.0\columnwidth]{tmintmaxexample-crop.pdf}
\caption{Illustration of the functions $\Tmin{i}{}$ and $\Tmax{i}{}$}
\label{fig:tmintmaxexample}
\end{figure} 

\subsubsection*{Step 2: Computing the maximum cumulative delay}

Given the bus availability model $\BusAvailability{i} = \left\langle \Tmin{i}{}, \Tmax{i}{} \right\rangle$ of task $\tau_i$, 
we propose an algorithm to compute the maximum cumulative delay incurred by memory requests of $\tau_i$ considering contention on the shared bus.
To achieve this, we define the two concepts of \emph{request-to-slot assignment} and \emph{request-set mapping} as defined below.

\begin{Definition}
\label{def:rts_assignment}
A request-to-slot assignment in the context of a \emph{single request} $\request{i}{k}$ is denoted by $\Assignment{i}{k}$
and defines that the $k^{th}$ request generated by $\tau_i$, i.e. $\request{i}{k}$, is served in the $\Assignment{i}{k}^{th}$ bus slot available to task $\tau_i$, i.e. in the $\Assignment{i}{k}^{th}$ free bus slot of $\tau_i$.
\end{Definition}


\begin{Definition}
\label{def:rs_mapping}
For a given task $\tau_i$, a request-set mapping  \\ $\Mapping_i$ = $\{\Assignment{i}{1}, \Assignment{i}{2}, \ldots, \Assignment{i}{\NbReqPerTask{i}}\}$ defines that  $\ \forall k \in \Requests{i}$: $\request{i}{k}$ is assigned to the $\Assignment{i}{k}^{th}$ free bus slot of $\tau_i$. 
\end{Definition}

Given these definitions, we further divide this step into two phases; the first phase focuses on the maximum delay incurred by a single request and the second focuses on the maximum cumulative delay incurred by a set of consecutive requests.

\begin{description}
\item[Phase 1.] Given the bus availability model $\BusAvailability{i} = \left\langle \Tmin{i}{}, \Tmax{i}{} \right\rangle$ of task $\tau_i$, its $k^{th}$ request $\request{i}{k}$ and a request-to-slot assignment $\Assignment{i}{k}$ for that request, we compute the maximum delay that $\request{i}{k}$ can incur by computing a lower-bound on its release time, $\reqrel{i}{k}$, and an upper-bound on its service time, $\reqserv{i}{k}$, with the objective of maximizing its waiting time (i.e., $\reqserv{i}{k}-\reqrel{i}{k}$).
\item[Phase 2.] In the second phase, given the bus availability model represented as $\BusAvailability{i} = \left\langle \Tmin{i}{}, \Tmax{i}{} \right\rangle$ of task $\tau_i$ and a request-set mapping $\Mapping_i$ = $\{\Assignment{i}{1}, \Assignment{i}{2}, \ldots, \Assignment{i}{\NbReqPerTask{i}}\}$ for all its requests, we compute the overall maximum cumulative delay that can be incurred by these requests.
\end{description}
%As in the previous phase, assuming a given request-set mapping $\Mapping_i$, we find the release time and service time ($\reqrel{i}{k}$ and $\reqserv{i}{k}$), for all the corresponding requests with the objective of maximizing the cumulative delay of all these requests.
This proposed analysis to compute the maximum cumulative delay for a given request-set mapping is presented in Section~\ref{sec:wc_delay}.
 
\subsubsection*{Step 3: Finding the worst-case request-set mapping}

While the previous step provides an algorithm to compute the maximum cumulative delay for a \emph{given request-set mapping}, the
goal of this third step is to find a request-set mapping for which the maximum cumulative delay is the largest among all feasible mappings.
We propose an algorithm in Section~\ref{sec:wc_assignment} to determine such a request-set mapping for all the requests of a given task $\tau_i$. Our technique first computes an upper bound, $\TaskUBslot{i}$, on the number of free bus slots that can possibly be used by task $\tau_i$.
This upper bound gives us a conservative range $[1,\TaskUBslot{i}]$ of free bus slots within which all the requests of the analyzed task $\tau_i$ will be served. Note that this number of free bus slots, $\TaskUBslot{i}$, may be much greater than the number $\NbReqPerTask{i}$ of requests
to be served. 

A naive approach to maximize the cumulative delay incurred by the (at most) $\NbReqPerTask{i}$ requests of $\tau_i$ is to apply brute force, i.e. all the request-set mappings are explored and a maximum cumulative delay is computed for each of them using the method proposed in Step~2; At the end, only the largest cumulative delay from all mappings is returned. However, such a method does not scale and is computationally inefficient due to the exhaustive exploration of all the possible mappings.  
We significantly reduce the computation time of the proposed analysis by eliminating the request-set mappings that cannot possibly lead to the worst-case delays at an early stage of the analysis. 
 

\subsubsection*{Step 4: Tightening the analysis using sampling regions}

Having shown how to determine the worst request-set mapping in Step~3, and bounding the maximum cumulative delay for that mapping using the technique explained in Step~2, there is further scope of tightening the analysis by exploiting the 
information about the maximum number of requests in each of the constituent regions of the analyzed task. The region-based analysis limits the range of the potential free bus slots used by a set of requests. For example, if the $k^{th}$ request of $\tau_i$ is generated in the $g$'th region, then it \emph{cannot} be served in the $j^{th}$ free bus slot of $\tau_i$ if $\Tmax{i}{j} < \SamplingRegionSize{i} \times (g-1)$, where $\SamplingRegionSize{i}$ is the size of the sampling interval. 
From these constraints, we define a range $[\LBslot{i}{g}, \UBslot{i}{g}]$ 
for each region $g$, representing the first and last free bus slots in which requests from region $g$ can possibly be served. These bounds are employed by the proposed algorithm to tighten the analysis by defining a request-set mapping for each individual region. The maximum delays incurred  by the requests of each region are computed successively and the overall WCET is subsequently computed. 
This process is described in detail in Section~\ref{sec:complexity_reduction}.


\section{Modeling the Availability of the Bus}
\label{sec:bus_availability}

The memory bus is a shared resource, which means that any access to it by a given task may be deferred because of concurrent accesses from other tasks. 
To estimate the overall delay that can be incurred by a task due to the contention for a shared bus, a basic approach could be to first derive an upper bound on the delay that a \emph{single} access may incur. This upper bound is computed by constructing a worst-case scenario in which every competing task gathers all its accesses to the bus within the shortest possible time window. This creates a burst of accesses all concentrated in time and occurring exactly when the request from the analyzed task is released, thereby inducing the maximum delay for this request. Then, the overall delay that a \emph{sequence}
 of requests may suffer is computed by assuming that each access to the shared bus incurs this precomputed maximum delay. 
This assumption will lead to conservative estimates, since the other tasks keep progressing in their execution, alternating between computation and memory fetch phases, and do not congest the memory bus at all times. 

We propose an alternative approach which bases its computation on a new modeling framework. Instead of computing a worst-case scenario for a single access to the shared bus and then considering that scenario for each and every request of the analyzed task, we model the overall availability of the bus to the analyzed task. Then, as the next step, we leverage this new model to derive an upper bound on the \emph{cumulative} delay that a \emph{sequence} of requests may incur. 

Our model captures the best-case and worst-case availability of the shared bus. It is based on the arbiter and coarse-grained memory access information provided by the task-region profiles. Specifically, for a given task $\tau_i$ under analysis and any positive integer $j$, we compute the two functions $\Tmin{i}{j}$ and $\Tmax{i}{j}$ that give the \emph{earliest} and \emph{latest} instants of the $j^{th}$ free bus slot of $\tau_i$. 
If $\tau_i$ is run in isolation, there are no competing requests for a work conserving bus, and the bus is always available to $\tau_i$. In such a case $\Tmin{i}{j} = \Tmax{i}{j} = (j-1) \cdot \TR$ for all $j > 0$. 
In other cases, when the task is in contention or the bus arbiter uses reservation of slots as in TDM, we have $\Tmin{i}{j} < \Tmax{i}{j}$. These two functions form what we call the \emph{bus availability model} $\BusAvailability{i} = \left\langle \Tmin{i}{}, \Tmax{i}{} \right\rangle$ of task $\tau_i$. This model can be 
computed for any predictable resource and a wide range of arbitration policies. Next, we demonstrate the computation of this bus model for two distinct cases: a non-work-conserving TDM arbiter and a work-conserving fixed-priority arbiter.

\subsection{Non-Work-Conserving TDM Arbitration}

A TDM arbiter works by periodically repeating a schedule, or frame,
with fixed size, $\frameSize$. Each core $\pi_{p}$ is allocated a
number of slots $\slots_{p}$ in the frame at design time, such that
$\sum_{\pi_{p}} \slots_{p} \leq \frameSize$. There are different
policies for distributing the slots allocated to a core within the TDM
frame, but here we consider the case where slots are assigned
contiguously for simplicity.  An example of a TDM frame, a contiguous
allocation, and some of the associated terminology is illustrated in
Figure~\ref{fig:tdm}. 

\begin{figure}[htb]
\centering
\includegraphics[width=0.55\columnwidth]{tdm_centralized.pdf}
\caption{TDM frame with 7 slots using a contiguous slot allocation.}
\label{fig:tdm}
\end{figure} 

We consider a non-work-conserving instance of the TDM arbiter, which
means that requests from a core are only scheduled during bus slots
allocated to that core. Empty slots or slots allocated to other cores
without pending requests are hence not utilized. This type of policy
makes the timing behavior of memory requests of tasks scheduled on
different cores completely independent. As a result, only the
configuration of the arbiter has to be considered when determining
$\Tmin{}{}$ and $\Tmax{}{}$. For non-work-conserving TDM arbitration with a contiguous slot
allocation, $\Tmin{}{}$ and $\Tmax{}{}$, for task $\tau_i$ assigned to core $\pi_p$ are derived according to
Equations~\eqref{eq:tmin} and \eqref{eq:tmax}, respectively. 

\begin{eqnarray}
\label{eq:tmin} \Tmin{i}{j} & = & \big(\left\lfloor \frac{j - 1}{\slots_{p}} \right\rfloor \times \frameSize + ((j-1)\mod \slots_{p})\big) \times \TR \\
\label{eq:tmax} \Tmax{i}{j} & = & \Tmin{i}{j} + (\frameSize - \slots_{p} + 1) \times \TR 
\end{eqnarray}

The first term in the computation of $\Tmin{}{}$ in Equation~\eqref{eq:tmin}
corresponds to the minimum required number of full iterations of the TDM frame
to produce $j$ free slots for $\tau_{i}$ and the second term corresponds to the remaining number of
required slots after these iterations. 
The computation of $\Tmax{}{}$
is similar, except that it adds an additional $\frameSize - \slots_{p} + 1$
slots to account for releases with maximum misalignment with respect
to the set of contiguous slots allocated to the core in the TDM frame (including just missing its own last slot, i.e. the ``+1'').
Note that these equations also cover non-work-conserving round-robin
arbitration, since it is just a special case of TDM where $\frameSize$ equals
the number of cores sharing the bus and $\forall \pi_{p} \;
\slots_{p} = 1$.  Work-conserving versions of both these arbitration
policies can be derived by additionally considering the task-region profiles, although this is omitted for brevity.
Figure~\ref{fig:tdm} graphically illustrates the arrival times and
waiting times corresponding to $\Tmin{1}{1}$ and $\Tmax{1}{1}$. 
As seen in the figure, the $\Tmin{1}{1} = 0$, is achieved for a request
that arrives just at the beginning of any of the two slots allocated to
its corresponding core and $\Tmax{1}{1} = 6$ for a request arriving just after
the last slot allocated to its core has been left idle.
For this particular arbitration policy, the best-case and worst-case
arrival with respect to the TDM frame is the same for any value
of $j$, although this does not hold in general.


\subsection{Work-Conserving Fixed-Priority Arbitration}

In the context of bus arbitration policies, one of the challenges with currently existing COTS-based multi-core systems is that the memory bus does not recognize/respect task priorities. This is because the bus is generally designed with the aim of enhancing the average-case performance and is not tailored for real-time systems. This can lead to a scenario similar to priority inversion in which requests from higher priority tasks are delayed by requests from lower-priority tasks on the bus. 
Although the scheduler enforces these priorities while allocating cores to tasks, these priorities are not passed over to the shared hardware resources like the memory bus, which have their own scheduling policies. 
This problem has been addressed in research by enabling priorities in priority-driven arbiters to be software programmable directly~\cite{Akesson09dsd} or indirectly by tagging each request with its priority~\cite{miao}. 
We assume in this section that the memory bus is designed according to any of these strategies.
Based on this, we design a bus-availability model for a fixed-priority arbiter. 

Assume that the analyzed task $\tau_i$ is scheduled on core $\pi_p$.
Despite the uncertainty of the arrival patterns of the requests, it is important to 
determine a lower and upper bound on the cumulative number of requests that tasks with higher priority than $\tau_i$ and scheduled on the interfering cores $\pi_q \neq \pi_p$
may inject into the bus. These bounds are denoted by the \emph{per-core request profiles} $\PCRP^{min}_q(i,t)$ and $\PCRP^{max}_q(i,t)$ functions, respectively, which can be computed as shown in~\cite{Icess12} with a pseudo-polynomial time complexity of $O(C_{\max}^2)$, where $C_{\max}$ denotes the maximum WCET among tasks deployed on the given core.  

The computation of these functions, in summary is akin to the bin packing problem, in which we need to pack the maximum (or minimum) number of (interfering) requests in a given interval of duration $t$ during which the analyzed task $\tau_i$ executes. 
To do so, the tasks of higher priority than $\tau_i$ that run on the interfering cores are ordered by request densities and then packed within an interval of time $t$ in such a manner that the minimum (or maximum) number of requests is derived, while respecting the task arrival rates. 
With this information, we derive the corresponding earliest and latest times at which free slots are available to the analyzed task $\tau_i$ assigned to core $\pi_p$ according to Equations~\eqref{eq:TminGen} and~\eqref{eq:TmaxGen}, respectively. Just like the computation of $\Tmax{}{}$ for TDM in Equation~\eqref{eq:tmax}, Equation~\eqref{eq:TmaxGen} adds an extra $\TR$ to capture the possible situation where the task just misses the last free slot before experiencing its worst-case interference pattern.

\begin{eqnarray}
\label{eq:TminGen}\Tmin{i}{j} =  \min_{t \ge 0} \{ t | t- (\sum_{\pi_q \neq \pi_p} \PCRP_q^{\min}(i,t) \times \TR)  = (j- 1)  \times \TR \} \\
\label{eq:TmaxGen}\Tmax{i}{j} = \min_{t \ge 0} \{ t | t - (\sum_{\pi_q \neq \pi_p} \PCRP_q^{\max}(i,t) \times \TR ) = (j-1)  \times \TR \}   + \TR  
\end{eqnarray}

From the perspective of the analyzed task $\tau_i$ executing on core $\pi_p$, the memory bus can be viewed as a resource with two alternating phases: a busy phase, in which it serves the requests from the other cores and an idle phase, in which it is available to $\tau_i$. 
Equation~\eqref{eq:TmaxGen} can be interpreted as follows: 
The other cores will issue $\sum_{\pi_q \neq \pi_p} \PCRP_q^{\max}(i,t)$ requests and utilize the corresponding number of bus slots, each of length $\TR$. 
The analyzed task can only serve its $j^{th}$ request after $\sum_{\pi_q \neq \pi_p} \PCRP_q^{\max}(i,t)$ requests of the interfering tasks are served and then a free slot is available.
In the worst case,  the request will be released just after the beginning of the free slot. Hence the next slot will be available for the request of the analyzed task after a time $\TR$. 
Given this, when in isolation, $\sum_{\pi_q \neq \pi_p} \PCRP_q^{\min}(i,t) =0$ and  $\sum_{\pi_q \neq \pi_p} \PCRP_q^{\max}(i,t) =0$
and hence $\Tmin{i}{1}=0$ and $\Tmax{i}{1} = \TR$. 
We pre-compute and store values of  $\Tmax{i}{}$ and $\Tmin{i}{}$ for all $j$ while the resulting $t<=D_i$.       



Just like our TDM model can be used to capture the special case of round-robin arbitration,
the unspecified work-conserving arbiter presented in~\cite{Ernst,Cister}
is just a special case of the fixed-priority arbiter presented in this
section. The unspecified work-conserving arbiter was defined in the
context of COTS systems, where the arbitration mechanism is not always specified. 
However, it is still possible to analyze the system if it can be assumed that
the arbiter is work-conserving, which is reasonable for example in the context of 
commercially-available memory controllers that are designed to optimize average performance. Capturing this arbiter
with this model only requires a slight modification to the PCRP functions to make every task believe
that it has the lowest priority in the system, which is the same way it was captured in the original publications. Although this is likely to result in a very pessimistic estimation of the worst-case delay, 
it enables conservative analysis of the system.



As seen in this section, the $\Tmin{i}{j}$ and $\Tmax{i}{j}$ functions are arbitration dependent and can be computed for different arbiters (TDM, round-robin, fixed-priority and the unspecified work-conserving arbiter). 
These functions serve as an input to the next steps of the proposed framework that compute the increased execution time based on the model. In contrast, the methods described in the following sections are independent of the arbitration mechanism.

\section{Finding the Maximum Delay for a Request-set Mapping}
\label{sec:wc_delay}

We have presented a model that captures the availability of the memory bus to a given task and demonstrated its use for
two very different arbitration mechanisms and highlighted additional arbiters that are supported as special cases of these. This section continues by first describing a method to compute the maximum waiting time of a request $\request{i}{k}$, given a request-to-slot assignment $\Assignment{i}{k}$ for that request, and the bus availability model $\BusAvailability{i}$. The same rationale is then extended to compute the cumulative waiting time for a sequence of requests of a given task $\tau_i$.


\subsection{Maximum Delay for a Single Request}
\label{sec:maxdelay_single}

For a given request $\request{i}{k}$ and its request-to-slot assignment $\Assignment{i}{k}$, the key idea to maximize its waiting time is to release that request as early as possible and delay its service time as much as possible.
In other words, for a given request $\request{i}{k}$ and a request-to-slot assignment $\Assignment{i}{k}$, we need to determine a \emph{lower} bound on its release time and an \emph{upper} bound on its service time and
then compute the resulting waiting time. This is done in Lemmas~\ref{lem:releaseLB} and~\ref{lem:UBreqserv}, respectively.

\begin{lemma}[A lower bound on the release time of a request]
\label{lem:releaseLB}
For any task $\tau_i \in \tau$ and for all $k > 1$, let $\request{i}{k-1}$ and $\request{i}{k}$ be two consecutive requests generated by $\tau_i$. For a given request-to-slot assignment $\Assignment{i}{k-1}$ and $\Assignment{i}{k}$, if request $\request{i}{k-1}$ has been served at time $\reqserv{i}{k-1}$ in the $\Assignment{i}{k-1}$'th free bus slot then it holds that the release time $\reqrel{i}{k}$ of $\request{i}{k}$ is such that
\begin{equation}
\label{equ:LBreqrel}
\reqrel{i}{k} \geq \max(\Tmin{i}{\Assignment{i}{k} - 1} + 1 , \reqserv{i}{k-1} + (\Assignment{i}{k} - \Assignment{i}{k-1}) \times \TR)
\end{equation}
\end{lemma}

\begin{proof}
The lemma is based on two simple observations, corresponding to the two terms in Equation~\eqref{equ:LBreqrel}.
\begin{enumerate}
 \item If it is given that $\request{i}{k}$ is served in the $\Assignment{i}{k}$'th free bus slot of $\tau_i$ then its earliest release time is \emph{immediately after} the earliest time-instant at which the bus can be free for 
the $(\Assignment{i}{k}-1)$'th time. Otherwise, the request would have been served in the previous available free slot, $(\Assignment{i}{k}-1)$. 
Formally, this implies $\reqrel{i}{k} \geq \Tmin{i}{\Assignment{i}{k} - 1} + 1$.
\item Since we assume that a core stalls while its requests are being served, it follows that a request can only be released after the previous request from the same task has been served, i.e. $\reqrel{i}{k} \geq \reqserv{i}{k-1}$. In addition, for request $\request{i}{k}$ to be served in the $\Assignment{i}{k}$'th free bus slot of $\tau_i$, it must hold that $\request{i}{k}$ has missed all the intermediate free bus slots between the $\Assignment{i}{k-1}$'th and the $\Assignment{i}{k}$'th, i.e. 
%Otherwise, the request $\Assignment{i}{k}$ would correspond to an earlier free slot than given by the mapping. This gives us to the term 
$\reqrel{i}{k} \geq  \reqserv{i}{k-1} + (\Assignment{i}{k} - \Assignment{i}{k-1}) \times \TR$.
\end{enumerate}
 
In order to satisfy both conditions, the maximum of the resulting values is considered.\qed
\end{proof}

\begin{lemma}[An upper bound on the service time of a request]
\label{lem:UBreqserv}
For any task $\tau_i \in \tau$ and for all $k > 1$, if request $\request{i}{k}$ is served at time $\reqserv{i}{k}$ in the $\Assignment{i}{k}$'th free bus slot then it holds that
\begin{equation}
\label{equ:UBreqserv}
\reqserv{i}{k} \leq \min(\Tmax{i}{\Assignment{i}{k}}, \reqrel{i}{k} + \Tmax{i}{1})
\end{equation}
\end{lemma}

\begin{proof}
The latest time at which request $\request{i}{k}$ assigned to slot $\Assignment{i}{k}$ is served is $\Tmax{i}{\Assignment{i}{k}}$ (by definition). However, since $\Tmax{i}{1}$ is defined as the maximum delay that a request may suffer, the value of $\reqserv{i}{k}$ cannot be greater than $\reqrel{i}{k} + \Tmax{i}{1}$. Equation~\eqref{equ:UBreqserv} upholds these two conditions by considering the minimum of the respective values.\qed
\end{proof}

The maximum delay for servicing the given request $\request{i}{k}$ in slot $\Assignment{i}{k}$  is then given by the difference between the upper bound on its service time and the lower bound on its release time.


\subsection{Maximum Cumulative Delay for a Request-set Mapping}

In the previous section, we established a method to compute an upper bound on the delay of a single request assigned to a given free bus slot. 
Now, we extend this result to maximize the cumulative delay of a \emph{sequence} of $\NbReqPerTask{i}$ requests, given a request-set mapping $\Mapping_i = \{ \AssignmentOne{i}{1}, \ldots, \AssignmentOne{i}{\NbReqPerTask{i}} \}$ for that sequence. 
To maximize the cumulative delay for the mapping $\Mapping_i$, we compute the individual maximum delay for each request by applying Lemmas~\ref{lem:releaseLB} and~\ref{lem:UBreqserv}. Since the release time (and thus the delay) of a given request $\request{i}{k}$ depends on the service time $\reqserv{i}{k-1}$ of the previous one (see Equation~\eqref{equ:LBreqrel}), we start by computing the maximum delay of the first request $\request{i}{1}$ and iterate up to request $\request{i}{\NbReqPerTask{i}}$.
We show in Lemma~\ref{lem:wccd} that this iterative process leads to a worst-case cumulative delay. 
The lemma is proven by induction and case enumeration and is found in the appendix.
The main benefit of the lemma is that it establishes 
that the maximum cumulative delay of a request-set mapping can be computed in an iterative manner.
We exploit this in the next section as we present an algorithm to find the worst-case 
request-set mapping.

\begin{lemma}[Worst-case cumulative delay]
\label{lem:wccd} 
Let $\Mapping_i  = \{ \AssignmentOne{i}{1}, \ldots, \AssignmentOne{i}{\NbReqPerTask{i}} \}$ refer to a request-set mapping for the $\NbReqPerTask{i}$ requests of task $\tau_i$ and $\DelayOne{i}{k}$ the maximum cumulative delay for the first $k$ requests $\{\request{i}{1}, \request{i}{2},  \dots \request{i}{k}\}$, given this mapping $\Mapping_i$. The cumulative delay $\DelayOne{i}{\NbReqPerTask{i}} = \sum_{k=1}^{\NbReqPerTask{i}} (\reqserv{i}{k} - \reqrel{i}{k})$ of the $\NbReqPerTask{i}$ requests of $\tau_i$ is maximized for:
\newcommand{\WidestPart}{\ensuremath{\max( \Tmin{i}{\Assignment{i}{k} - 1} + 1, \reqserv{i}{k-1} + \Delta_k)}}%
\newcommand{\FixedSize}[1]{\makebox[\widthof{\WidestPart}][l]{\ensuremath{#1}}}%
\begin{align}
\reqrel{i}{k} &= 
   \begin{cases}
   \FixedSize{\Tmin{i}{\Assignment{i}{k} - 1} + 1}  \qquad \mbox{ if } k = 1\\
   \FixedSize{\max( \Tmin{i}{\Assignment{i}{k} - 1} + 1, \reqserv{i}{k-1} + \Delta_k)}   \label{equ:reqrelk}  \qquad \mbox{ otherwise}
   \end{cases} \\
   \reqserv{i}{k} &=\FixedSize{\min( \Tmax{i}{\Assignment{i}{k}}, \reqrel{i}{k} + \Tmax{i}{1} )}   \label{equ:reqservk}  
\end{align}
where $\Delta_k = (\Assignment{i}{k} - \Assignment{i}{k-1}) \times \TR$.
\end{lemma}
 
A detailed proof is presented in the Appendix and the interested reader may please refer the same.

\section{Finding the Worst-case Request-set Mapping}
\label{sec:wc_assignment}

We have presented a bus availability model and shown how to leverage it to compute the maximum cumulative
delay for \emph{a given sequence of requests and a given request-set mapping}. This section proceeds by presenting how to efficiently determine
a request-set mapping for which the maximum cumulative delay is the highest among all possible request-set mappings, i.e. a worst-case request-set mapping. 
First, we present the basic algorithm to find this worst-case mapping. Then, we proceed by 
presenting how to eliminate, at an early stage of the computation, many intermediate mappings considered by the algorithm so that the computation time and the memory requirements are reduced.

\subsection{Algorithm Description}

This section proposes an algorithm to find the request-set mapping that maximizes the cumulative delay.
In order to eliminate unfeasible mappings that will \emph{provably} not contribute to the global maximum,
we start by presenting an important corollary of Lemma~\ref{lem:wccd} followed by a relevant observation, which eventually forms the basis of the algorithm.

\begin{corollary}[Dependency between worst-case cumulative delays]
\label{cor:dependency}
Let us assume a sequence of $k$ requests $\{ \request{i}{1}, \request{i}{2}, \ldots, \request{i}{k}\}$ from task $\tau_i$ and a given request-set mapping $\Mapping_i  = \{ \Assignment{i}{1}, \Assignment{i}{2}, \ldots, \Assignment{i}{k} \}$ for these requests. Let us denote by $\DelayOne{i}{k}$ the \emph{maximum} cumulative delay for these $k$ requests (computed using Lemma~\ref{lem:wccd}). Now, suppose that we extend the sequence with an extra request with index $(k+1)$ assigned to slot $h$, i.e. $\Assignment{i}{k+1} = h$ such that $h > \Assignment{i}{k}$. The maximum cumulative delay $\DelayOne{i}{k+1}$ for the $k+1$ requests can be obtained simply by adding to $\DelayOne{i}{k}$ the maximum delay for that last request $\request{i}{k+1}$. This maximum delay for $\request{i}{k+1}$ can be obtained by using Equations~\eqref{equ:reqrelk} and~\eqref{equ:reqservk}, where $\reqserv{i}{k}$ is the service time of the $k^{th}$ request that was obtained during the computation of $\DelayOne{i}{k}$.
\end{corollary}

\begin{proof}
The corollary is a direct consequence of Equations~\eqref{equ:reqrelk} and~\eqref{equ:reqservk}. When applying the method of computation of Lemma~\ref{lem:wccd} to the set of ($k+1$) requests $\{\request{i}{1}, \request{i}{2}, \ldots, \request{i}{k+1}\}$, the resulting cumulative delay $\DelayOne{i}{k}$ after the $k^{th}$ iteration is the same as the delay $\DelayOne{i}{k}$ obtained when applying this method to the set of $k$ requests $\{\request{i}{1}, \request{i}{2}, \ldots, \request{i}{k}\}$. In other words, the computation of the maximum cumulative delay for the first $k$ requests is independent of whether or not there is a ($k+1$)'th request in the input sequence.
\end{proof}

\begin{Observation}
\label{obs:ob2}
If a sequence of $(k+1)$ consecutive requests of a task $\tau_i$ are served within the first and the $h$'th slot available to $\tau_i$, i.e. within the range $[1,h]$ of free bus slots of $\tau_i$, then the maximum cumulative delay for these $(k+1)$ requests is the maximum between the largest delay computed in the following two scenarios: 
\begin{enumerate}
\item The ($k+1$) requests are \emph{all} served within the range $[1,h-1]$ of free bus slots.
\item The first $k$ requests are served within the slots $[1,h-1]$ and the ($k+1$)'th request is served in slot $h$.
\end{enumerate}
This observation holds true as these two cases are \emph{mutually exclusive and jointly exhaustive}, which implies that any feasible assignment falls either in Case~1 or Case~2 and taking the maximum among the resulting delays is trivially safe.
\end{Observation}

Based on this corollary and observation, we construct an algorithm to compute $\DelayOne{i}{k}$ from $\DelayOne{i}{k-1}$, $\forall k$, which ultimately yields $\DelayOne{i}{\NbReqPerTask{i}}$.  The proposed algorithm is \emph{safe-by-construction} as it computes $\DelayOne{i}{\NbReqPerTask{i}}$ by investigating all possible assignments of these $\NbReqPerTask{i}$ requests to the free bus slots and only discards assignments that are proven unfeasible. The algorithm is shown in Algorithm~\ref{algo:MaxDelay} and we proceed by discussing it in detail.

\begin{algorithm}[h!]
\LinesNumbered
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\SetKw{Return}{return}
\Input{$\NbReqPerTask{i}$: no. of requests, $\UBslotNew{i}$: last available slot}
\Output{$\DelayOne{i}{\NbReqPerTask{i}}$: maximum cumulative delay incurred by $\tau_i$. \\}
%$\NbReqPerTask{i}$: maximum nb. of requests generated by $\tau_i$, \\
%$\UBslotNew$: upper-bound on the nb. of slots that $\tau_i$ can use.
%$\UBslotNew{i} \leftarrow ...$ \; 
Create a 2D array of $\NbReqPerTask{i}$ rows and $\UBslotNew{i}$ columns, where each $\Cell{k}{j}$ at row $k$ and column $j$ is a list of tuples $\CellElement{k}{j}$, as explained in the description. \label{algo:line_table_create} \;
Set every cell of this array to an empty list $\emptyset$ \;
%\tcc{for each request $k$ iterate in available slots $j$ }
\For{$k \leftarrow 1$ \KwTo $\NbReqPerTask{i}$}   {
	\For{$j \leftarrow k$ \KwTo $\UBslotNew{i} - (\NbReqPerTask{i} - k)$}{
		\eIf{$k = 1$}{
			\lIf{$j > 1$}{$\Cell{k}{j} \leftarrow \Cell{k}{j-1}$\;}
			$\reqrel{i}{k} \leftarrow \Tmin{i}{j - 1} + 1$\;
			\tcp{we assume $\Tmin{i}{0} = -1$}
			\If{$\reqrel{i}{k} < C_i$}{
				$\reqserv{i}{k} \leftarrow \min(\Tmax{i}{j}, \reqrel{i}{k} + \Tmax{i}{1})$\;			 
				$\DelayOne{i}{k} \leftarrow \reqserv{i}{k} - \reqrel{i}{k}$ \; %\lnl{algo:line_j_1_end}
				$\Cell{k}{j}.\operatorname{add}(\left\langle \DelayOne{i}{k} , j, \reqserv{i}{k} \right\rangle)$\;
			}
		}
		{ %\lnl{algo:line_kj_n_start}
			$\Cell{k}{j} \leftarrow \Cell{k}{j-1}$\; 
			\tcp{$\Cell{k}{j-1} = \emptyset $ if $j = k$} %\lnl{algo:line_kj_n_mid}
			\ForEach{$\CellElement{k-1}{j-1} \in \Cell{k-1}{j-1}$}{ 
				\tcp{$\CellElement{k-1}{j-1} = \left\langle \DelayOne{i}{k-1}, \Assignment{i}{k-1}, \reqserv{i}{k-1} \right\rangle$}
				$\reqrel{i}{k} \leftarrow \max( \Tmin{i}{j - 1} + 1, \reqserv{i}{k-1} + (j - \Assignment{i}{k-1}) \times \TR )$\;
				\If{$\reqrel{i}{k} < \reqserv{i}{k-1} + C_i$ {\bf and} $\reqrel{i}{k} < C_i + \DelayOne{i}{k-1}$}{
					$\reqserv{i}{k} \leftarrow \min( \Tmax{i}{j}, \reqrel{i}{k} + \Tmax{i}{1} )$\;
					$\DelayOne{i}{k} \leftarrow  \DelayOne{i}{k-1} + \reqserv{i}{k} - \reqrel{i}{k}$ \; %\lnl{algo:line_kj_n_end}
					$\Cell{k}{j}.\operatorname{add}(\left\langle \DelayOne{i}{k}, j, \reqserv{i}{k} \right\rangle)$\;
				}
			}
		} 
	}
} %\lnl{algo:line_return}
\tcp{Return the max value of the Delay among the list of tuples  stored in the topmost right corner cell, i.e. $ \Cell{\NbReqPerTask{i}}{\UBslotNew{i}} $ }
\ForAll{$\CellElement{\NbReqPerTask{i}}{\UBslotNew{i}} \in \Cell{\NbReqPerTask{i}}{\UBslotNew{i}}$}{
	\tcp{$\CellElement{\NbReqPerTask{i}}{\UBslotNew{i}} = \left\langle \DelayOne{i}{\NbReqPerTask{i}}, \UBslotNew{i}, \reqserv{i}{\NbReqPerTask{i}} \right\rangle$} 
	$\finalResult \leftarrow \max(\finalResult, \DelayOne{i}{\NbReqPerTask{i}})$\;
}
\Return $\finalResult$ \;
%$ \finalResult \leftarrow \max_{\CellElement{\NbReqPerTask{i}}{\UBslotNew{i}} \in \Cell{\NbReqPerTask{i}}{\UBslotNew{i}}} \DelayOne{i}{\NbReqPerTask{i}} $\;  
%\tcp{Return maxdelay}
\caption{MaxRegDelay($\NbReqPerTask{i}$, $\UBslotNew{i}$)}   
\label{algo:MaxDelay}
\end{algorithm}

The request-set mappings are captured in a two-dimensional array with $\NbReqPerTask{i}$ rows and $\UBslotNew{i}$ columns.
The input to the algorithm is the number $\NbReqPerTask{i}$ of requests of the analyzed task $\tau_i$, and an upper bound on the available slots in which the $\NbReqPerTask{i}$ requests may be served. 
Note that the variables $k$ and $j$ are used to refer to requests and slots, respectively.
Each $\Cell{k}{j}$ of this array holds a list of tuples $\CellElement{k}{j} = \left\langle \DelayOne{i}{k}, \Assignment{i}{k}, \reqservOne{i}{k} \right\rangle$, where each tuple $\CellElement{k}{j}$ in that list reflects a feasible request-set mapping of the first $k$ requests to $k$ free bus slots within the range $[1,j]$ of slots available to $\tau_i$. 
The members of this tuple denote:

\begin{itemize}
 \item The maximum delay $\DelayOne{i}{k}$ that can be obtained with the corresponding request-set mapping,
 \item The free bus slot in which the $k^{th}$ request has been served to reach that maximum delay $\DelayOne{i}{k}$, i.e. $\Assignment{i}{k} \in [k,j]$, and
 \item The corresponding time $\reqservOne{i}{k}$ at which that $k^{th}$ request has been served in that slot to obtain the delay $\DelayOne{i}{k}$.
\end{itemize}

The algorithm proceeds in a row-wise manner: it assigns the first request $\request{i}{1}$ to all feasible free bus slots and computes the maximum cumulative delay for each such assignment. Then, it proceeds to analyze the second request (next row of the array) and so on. For the first request and first free bus slot, the algorithm computes \emph{the} worst-case delay when the first request is assigned to that slot (Lines~7, 9, 10). To do so, it uses Lemma~\ref{lem:wccd} and adds the corresponding tuple $\CellElement{1}{1}$ to the list of $\Cell{1}{1}$ in Line~11. In this case, we have $\reqrel{i}{1} = 0$, $\reqserv{i}{k} = \Tmax{i}{1}$, and $\CellElement{1}{1} = \left\langle \Tmax{i}{1}, 1, \Tmax{i}{1} \right\rangle$. The list contains only this tuple. The if-statement of Line~8 aims at reducing the computation time of the algorithm by discarding all the request-to-slot assignments which impose on the first request $\request{i}{1}$ to be released after the task has run for $C_i$ time units, which is impossible. 

For $k=1$ and  $j>1$, the algorithm computes \emph{all} the maximum delays by considering every assignment of the first request, $\request{i}{1}$ , to free bus slots $\leq j$. First,
the list of the current $\Cell{1}{j}$ is initialized to the list of the previous $\Cell{1}{j-1}$ (Line~6), thereby carrying on all the possible worst-case delays that were obtained when this first request was assigned to a previous free bus slot $< j$. Then, the algorithm addresses the case where the first request is assigned to the $j^{th}$ bus slot by making use of the equations of Lemma~\ref{lem:wccd} to compute $\reqrel{i}{1}$ and $\reqserv{i}{1}$ and appends the corresponding tuple $\CellElement{1}{j}$ to the list of $\Cell{1}{j}$ (Lines 7, 9, 10, and 11 again).

%stems from the fact that any two requests belonging to a task of length $C_i$ cannot have their release times separated by more than $C_i$ time units. The addition of the "if-statement" at Line~8 filters out a considerable number of unfeasible slot assignments for request $\request{i}{1}$, as this condition is violated when $j$ gets larger. It ensures that any partial solution in which the first request is released after the task has run for $C_i$ time units is immediately discarded, thereby pruning the search space by eliminating all solutions that start with this first erroneous request-to-slot assignment $\Assignment{i}{1} > C_i$ as soon as they are detected. 

When $k>1$ and $j\geq k$, the algorithm computes \emph{all} worst-case delays that can be obtained when the first $k$ requests of $\tau_i$ are assigned to any free bus slots within $[k,j]$. On Line~14, the algorithm initializes the list of $\Cell{k}{j}$ to the list of results obtained for the $\Cell{k}{j-1}$. Informally, this reflects Case~1 in Observation~\ref{obs:ob2}, which states that the worst-case cumulative delay of the first $k$ requests may be found in the set of maximum delays obtained when these $k$ requests are \emph{all} served \emph{before} the $j^{th}$ free bus slot. Then on Line~15, the algorithm inspects every maximum delay that has been obtained assuming that the first $k-1$ requests were served \emph{before} the $j^{th}$ free bus slot. For each of these delays $\DelayOne{i}{k-1}$, assuming that the $k^{th}$ request is now served in the $j^{th}$ free bus slot, Lines~16 and~18 compute the release and service time of that request $\request{i}{k}$ using the equations of Lemma~\ref{lem:wccd}, by referring to the corresponding request-to-slot assignment $\Assignment{i}{k-1}$ of the ($k-1$)'th request, as well as its service time $\reqserv{i}{k-1}$ in this free bus slot $\Assignment{i}{k-1}$. This reflects Case~2 in Observation~\ref{obs:ob2} as the maximum delay $\DelayOne{i}{k}$ for the first $k$ requests is computed assuming that request $\request{i}{k}$ is assigned to the $j^{th}$ free slot and the previous $k-1$ requests are served in the earlier bus slots. Note that the computation of the resulting maximum cumulative delay $\DelayOne{i}{k}$ in Line~19 is safe as explained in Corollary~\ref{cor:dependency}.  

Similarly to Line~8, the condition of Line~17 is used to filter out a host of unfeasible solutions. In short, the time interval between the release of the currently considered request $\request{i}{k}$ and the service time of the previous one cannot exceed the total execution requirement of the task \textbf{and} the current request cannot be released later than the maximum execution requirement of the task plus the maximum delay $\DelayOne{i}{k-1}$ that $\tau_i$ may incur till there due to interference with the first $(k-1)$ requests.

Note that $k$ spans from $1$ to $\NbReqPerTask{i}$, while $j$ takes all values within the range $[k, \UBslotNew{i} - (\NbReqPerTask{i} - k)]$. The reason for limiting the range of $j$ is because the $k^{th}$ request of $\tau_i$ cannot possibly be served in a free bus slot $< k$ (leading to a lower bound $j\geq k$) and the next $(\NbReqPerTask{i} - k)$ requests following $\request{i}{k}$ require at least $(\NbReqPerTask{i} - k)$ slots in order to be served (leading to the upper bound $j \leq \UBslotNew{i} - (\NbReqPerTask{i} - k)$). 



\subsection{Elimination of Unfeasible Request-set Mappings}
\label{sec:ListReduce}

Having proposed an algorithm to determine the worst-case request-set mapping, we proceed by improving its efficiency.
Algorithm~\ref{algo:MaxDelay} carries on all possible request-set mappings and their associated maximum delays, finally returning the one leading to the maximum cumulative delay. This section presents
two methods to identify which request-set mappings cannot lead to the worst-case cumulative delay and discard them at an
early stage of the computation. By pruning the solution space at each iteration, the set of candidate solutions is substantially reduced, thereby improving the scalability of the algorithm with respect to the number of requests and potential free bus slots.
Lemmas~\ref{lem:pruning_solution} and~\ref{lem:pruning_solution2} present the theoretical foundation for our pruning mechanisms. They establish two relations between
a pair of request-set mappings which, if satisfied, allows one of the mappings be pruned without risk of discarding the mapping leading to the worst-case cumulative delay.
The proofs of the two lemmas are similar and are both based on case enumeration. However, for completeness, they are both provided in the appendix.
 
\begin{lemma}
\label{lem:pruning_solution}
Let $\Mapping_i = \{ \AssignmentOne{i}{1}, \ldots, \AssignmentOne{i}{k} \}$ refer to a request-set mapping for the first $k$ requests of task $\tau_i$. Let $\DelayOne{i}{k}$ be the maximum cumulative delay for these $k$ requests considering this mapping $\Mapping_i $, and let $\reqservOne{i}{k}$ be the \emph{absolute} time at which the $k^{th}$ request is served in a scenario leading to this delay $\DelayOne{i}{k}$. Similarly, let $\Mapping_i' = \{ \AssignmentTwo{i}{1}, \ldots, \AssignmentTwo{i}{k} \}$ denote another request-set mapping for the first $k$ requests of task $\tau_i$. Let $\DelayTwo{i}{k}$ be the maximum cumulative delay considering this mapping $\Mapping_i'$, and let $\reqservTwo{i}{k}$ be the \emph{absolute} time at which the $k^{th}$ request is served in a scenario leading to this delay $\DelayTwo{i}{k}$. If it holds that

\begin{eqnarray}
\label{equ:lem_assumption0} & \AssignmentOne{i}{k} & \leq \AssignmentTwo{i}{k} \\
\label{equ:lem_assumption1} \text{and } & \DelayOne{i}{k} & \leq \DelayTwo{i}{k} \\
\label{equ:lem_assumption2} \text{and } & \reqservOne{i}{k} + (\AssignmentTwo{i}{k} - \AssignmentOne{i}{k}) \times \TR & \geq \reqservTwo{i}{k}
\end{eqnarray}
then for all $h > \AssignmentTwo{i}{k}$, assigning an extra request $\request{i}{k+1}$ to the $h$'th free bus slot in both mappings $\Mapping_i$ and $\Mapping_i'$, i.e., $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$, leads to
\begin{eqnarray}
\label{equ:lem_obj0} & \AssignmentOne{i}{k+1} & = \AssignmentTwo{i}{k+1} \\
\label{equ:lem_obj1} \text{and } & \DelayOne{i}{k+1} & \leq \DelayTwo{i}{k+1} \\
\label{equ:lem_obj2} \text{and } & \reqservOne{i}{k+1} + (\AssignmentTwo{i}{k+1} - \AssignmentOne{i}{k+1}) \times \TR & \geq \reqservTwo{i}{k+1}
\end{eqnarray}
\end{lemma}

A detailed proof is presented in the Appendix and the interested reader may please refer the same.
\begin{lemma}
\label{lem:pruning_solution2}
Under the same conditions as in Lemma~\ref{lem:pruning_solution}, if it holds that
% 
\begin{eqnarray}
\label{equ:lem2_assumption0} & \AssignmentOne{i}{k} & \leq \AssignmentTwo{i}{k} \\
\label{equ:lem2_assumption1} \text{and } & \DelayOne{i}{k} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) & \leq \DelayTwo{i}{k} \\
\label{equ:lem2_assumption2} \text{and } & \reqservOne{i}{k} + (\AssignmentTwo{i}{k} - \AssignmentOne{i}{k}) \times \TR & \leq \reqservTwo{i}{k}
\end{eqnarray}
% 
then for all $h > \AssignmentTwo{i}{k}$, assigning an extra request $\request{i}{k+1}$ to the $h$'th free bus slot in both mappings $\Mapping_i$ and $\Mapping_i'$, i.e., $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$, leads to
% 
\begin{eqnarray}
\label{equ:lem2_obj0} & \AssignmentOne{i}{k+1} & \leq \AssignmentTwo{i}{k+1} \\
\label{equ:lem2_obj1} \text{and } & \DelayOne{i}{k+1} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) & \leq \DelayTwo{i}{k+1} \\
\label{equ:lem2_obj2} \text{and } & \reqservOne{i}{k+1} + (\AssignmentTwo{i}{k+1} - \AssignmentOne{i}{k+1}) \times \TR & \leq \reqservTwo{i}{k+1}
\end{eqnarray}
% 
\end{lemma}

A detailed proof is presented in the Appendix and the interested reader may please refer the same.

The vital inference from the expressions in Lemmas~\ref{lem:pruning_solution} and~\ref{lem:pruning_solution2} is that the maximum cumulative delay for the first ($k+1$) requests of $\tau_i$ is higher by using the mapping $\Mapping_i'$ for the first $k$ requests instead of the mapping $\Mapping_i$. Then, since Conditions~\eqref{equ:lem_obj0},~\eqref{equ:lem_obj1}, and~\eqref{equ:lem_obj2} are the same as Conditions~\eqref{equ:lem_assumption0},~\eqref{equ:lem_assumption1}, and~\eqref{equ:lem_assumption2} (and the corresponding relation holds for the conditions in Lemma~\ref{lem:pruning_solution2}), the lemmas continue to hold for all subsequent requests $> k+1$ until the last request of $\tau_i$. This means that $\Mapping_i$ can be safely omitted during the computation of Algorithm~\ref{algo:MaxDelay} as it cannot lead to the maximum cumulative delay.

%% This means that, given any positive number $k$ of requests and any pair of mappings $\Mapping_i$ and $\Mapping_i'$ of the first $k$ requests of a task $\tau_i$ , if Conditions~\eqref{equ:lem_obj0},~\eqref{equ:lem_obj1}, and~\eqref{equ:lem_obj2} are satisfied then for any $x > k$, there exists no mappings $\Mapping_i*$ of the first $x$ requests such that the three following statements are all true:
%% \begin{enumerate}
%% \item The mapping of the first $k$ requests follows $\Mapping_i$, 
%% \item Request $(k+1)$ is assigned to any free bus slot $h > \AssignmentTwo{i}{k}$, and 
%% \item The cumulative delay for the first $x$ requests is maximum. 
%% \end{enumerate} 
%% Indeed, according to the lemma, it is always possible to obtain a higher delay by replacing the mapping $\Mapping_i$ of the first $k$ requests in $\Mapping_i*$ for the mapping $\Mapping_i'$ (the mapping of the next $(x-k)$ requests remains unchanged).

In order to leverage the result of Lemmas~\ref{lem:pruning_solution} and~\ref{lem:pruning_solution2}, we implement a function ``$\operatorname{ListReduce(\Cell{k}{j})}$'' at the end of the first inner loop, i.e., ``for $j \leftarrow k$ to $\UBslotNew{i} - (\NbReqPerTask{i} - k)$'' in Algorithm~\ref{algo:MaxDelay}. This function makes sure that $\nexists$ two distinct tuples $\CellElement{k}{j}$ and $\CellElement{k}{j}'$ in the list of $\Cell{k}{j}$ such that the conditions in Lemmas~\ref{lem:pruning_solution} and~\ref{lem:pruning_solution2} hold. Each time such a pair of tuples is found, only the one with the highest cumulative delay is kept while the other is discarded.
This is a key addition to the algorithm that \emph{significantly} reduces the number of tuples in $\Cell{k}{j}$.

\section{Region-based Analysis}
\label{sec:complexity_reduction}

As seen in Section~\ref{ssec:task_request_profiles}, we can obtain more information on the distribution of the requests by dividing the execution of each task into a sequence of sampling regions.
For each region, we can derive an upper bound on the number of requests that can be issued by the task within that region. However, Algorithm~\ref{algo:MaxDelay} did not leverage this region-specific information and
used only coarse-grained information about the number of requests in the \emph{entire task}, represented by $\NbReqPerTask{i}$. In other words, Algorithm~\ref{algo:MaxDelay} views the input task $\tau_i$ as a single region that can issue up to $\NbReqPerTask{i}$ requests, which may result in a pessimistic upper bound.

In contrast, finer-grained regions with a bounded number of requests and duration allows the analysis to narrow down the range of slots to which the requests can be mapped.
This region-based analysis has the advantage of limiting the number of possible candidate slots that must be explored, which decreases the computation time and tightens the analysis. We proceed by elaborating on the theoretical foundations of the analysis, followed by a detailed description of the algorithm.
 

\subsection{Theoretical Foundation}

When a task is divided into regions and runs in conjunction with other tasks, the time at which each of its regions starts executing depends on the delays incurred by the requests issued in its previous regions. This raises questions about what the worst-case starting time of a region is. Lemma~\ref{lem:latest_finish_time} below expresses a relation that exists between the starting time of a region and the maximum delay that it can incur. In essence, it shows that any region that incurs the maximum delay by starting at a time $t_1$ cannot finish later than if it had started at its maximum starting time $t_2$.
This property enables a fine-tuned WCET analysis in which the distribution of requests across regions is exploited to obtain region-accurate estimates. 

\begin{lemma}
\label{lem:latest_finish_time}
Let $g$ be a region of a task $\tau_i$ that starts at time $t$ and finishes at time $f$ after incurring its maximum blocking delay $d$. It holds that any earlier starting time $t' < t$ for region $g$ results in a maximum finishing time $f' \leq f$.
\end{lemma}

\begin{proof}
The proof is obtained by contradiction. Let us assume two execution scenarios for region $g$. In Scenario~1, region $g$ starts executing at time $t$ and finishes at time $f$ after incurring its maximum blocking delay $d$ whereas in Scenario~2, it starts at time $t' < t$ and finishes at time $f' > f$ (it thus incurs a delay $d' > d$). That is, in Scenario~2 region $g$ starts its execution earlier and finishes later than in Scenario~1. We show by contradiction that Scenario~2 is impossible. To do so, two cases must be explored: In Scenario~2,
\begin{description}
\item[\textbf{Case 1:}] region $g$ releases its first request \textbf{at or after} time $t$.
\item[\textbf{Case 2:}] region $g$ releases its first request \textbf{before} time $t$.
\end{description}

\emph{Case 1.} In this case, the request-to-slot assignments that led to the blocking delay $d'$ in Scenario~2 can also be used in Scenario~1, since the available free slots are the same in both scenarios. This would result in a delay $d$ equal to $d'$ in Scenario~1 and since $t > t'$ we get $f > f'$, which contradicts the initial assumption that $f < f'$.

\emph{Case 2.} In this case, region $g$ releases its first request before time $t$. Figure~\ref{fig:latest_finish_time} illustrates the two scenarios in such a situation. An ``X'' represents the release of a request, a continuous line represents the execution of the region, and a dashed line is an interval of time during which the task stalls, waiting for its last request to be served. It is assumed in this illustration that region $g$ generates a maximum of $\NbReqPerRegion{i}{g} = 4$ requests.

\begin{figure}[htb]
\centering
\includegraphics*[width=\columnwidth]{scenarios.jpg}
\caption{Illustration of Scenarios 1, 2, and 3 used in the proof of Lemma~\ref{lem:latest_finish_time}}
\label{fig:latest_finish_time}
\end{figure}

%If $d' \leq d$, we have $f = t + \SamplingRegionSize{i} + d$ and $f' = t' + \SamplingRegionSize{i} + d'$ and since $t' < t$, it holds that $f' < f$, which contradicts our assumption that $f' > f$. Thus we must have $d' > d$. 
Suppose that in Scenario~2, region $g$ incurs the maximum delay of ($t - t'$) in the time-interval $[t', t]$, by releasing a single request at the very beginning of its execution. The delay incurred by this single request can even extend until time $t'' > t$, as depicted in Scenario~2 in Figure~\ref{fig:latest_finish_time}. This situation can easily be shown to be a worst case for Scenario~2 (with respect to its finishing time), as it generates the maximum delay with the fewest requests and it delays the actual workload of $\SamplingRegionSize{i}$ units of execution as much as possible. 

Now, let us denote by $\{ \Assignment{i}{2}, \ldots, \Assignment{i}{\NbReqPerRegion{i}{g}} \}$ the request-set mapping of the $(\NbReqPerRegion{i}{g} - 1)$ last requests of region $g$ in Scenario~2 (note that, unlike what is depicted in Figure~\ref{fig:latest_finish_time}, the mapping of these requests may be the same as in Scenario~1). We can create a third scenario, in which region $g$ starts its execution at time $t$ (as in Scenario~1) and such that its first request is released at the beginning of its execution, thereby incurring the same delay between $[t, t'']$ as in Scenario~2, and all the subsequent requests follow the same request-to-slot assignments as in Scenario~2, thereby incurring again the same delay as in Scenario~2. In this new Scenario~3, it thus holds that region $g$ starts at time $t$ and finishes at time $f'' = f' > f$, which contradicts our initial assumption defining $d$ as the maximum delay that region $g$ can incur when starting at time $t$. 

In short, we showed in this proof that for any scenario in which a given region $g$ starts before a time $t$, releases requests before that time $t$, and finishes at a time $f'$ (like Scenario~2 in our proof), we can create a corresponding scenario (like Scenario~3 here) in which region $g$ starts at time $t$ and finishes at time $f'$ as well. Therefore, if we compute the maximum delay for a given region and a given starting time, it gives us a maximum finishing time for that region that cannot be earlier than in a scenario where the region starts earlier, which proves the lemma.\qed
%\begin{eqnarray}
%f'' & = & t + \SamplingRegionSize{i} + (d' - (t - t')) \nonumber \\
%& = & \SamplingRegionSize{i} + d' + t' \nonumber \\
%& = & f' > f\nonumber
%\end{eqnarray}
\end{proof}
%free-bus-slot
The important inference from Lemma~\ref{lem:latest_finish_time} is that the WCET of a task (considering contention) can be determined by computing the worst-case finishing time $f_1$ of its first region, and then iterating over the subsequent regions, assuming for each region $g$, a starting time of $f_{g-1}$. The WCET of the entire task is then given by the worst-case finishing time of its last region. This is exploited in our algorithm for region-based analysis, presented next.



\subsection{Algorithm for Region-based Analysis}

With Algorithm~\ref{alg:regAnalysis}, we propose an arbiter-independent method to determine the worst-case cumulative delay.
It is basically an extension of Algorithm~\ref{algo:MaxDelay} that augments it with region-based information.
Since the inputs to this algorithm are the $\Tmin{i}{}, \Tmax{i}{}$ functions and the details of the analyzed task,
any arbiter for which these values can be determined can leverage this algorithm.

\begin{algorithm}[h!]
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{$\tau_i$} 
\Output{WCET of $\tau_i$ (considering contention)} 
$w_i = \frac{C_i}{\SamplingRegionSize{i}}$\; 
\For { region $g$ in task $\tau_i$ from $1$ to $w_i$}
{ 
  $\NbReqPerRegion{i}{g} \leftarrow$ No of requests in region $g$ \;
  $\UBTime{i}{g} \leftarrow f_{i,g-1} + \SamplingRegionSize{i} + \NbReqPerRegion{i}{g} \cdot \Tmax{i}{1} $\;                                      
  \tcp{with $f_{i,0} = 0$}
  \tcp{Find the earliest slot for which $\Tmax{}{}$ is greater that the finishing time of the previous region}
  $\LBslot{i}{g} \leftarrow \min_{x > 0} \{ x \mid \Tmax{i}{x} \geq  f_{i,g-1} \} $\;
  \tcp{Find the earliest slot for which $\Tmin{}{}$ is greater that the coarse upper bound of the current region}
  $\UBslot{i}{g} \leftarrow \min_{x > 0} \{ x \mid \Tmin{i}{x} \geq  \UBTime{i}{g} \} $\;   
  $\delta_{i,g} = \MaxRegDelay (\NbReqPerRegion{i}{g}, \LBslot{i}{g},\UBslot{i}{g}) $\;
  $f_{i,g} = f_{i,g-1} + \SamplingRegionSize{i} + \delta_{i,g}$\; 
}  
 \Return{$f_{i, w_i}$}\;
\caption{ComputeTaskWCET($\tau_i$)}
\label{alg:regAnalysis}   
\end{algorithm}

The algorithm commences by computing the number $w_i$ of regions (Line~1) and then considers each region $g$ successively (Line~2),
which was shown to be safe by Lemma~\ref{lem:latest_finish_time}.
Next, given the number $\NbReqPerRegion{i}{g}$ of requests in the analyzed region $g$, it finds a \emph{coarse} upper bound on its increased execution time $\UBTime{i}{g}$ assuming that each request in region $g$ may incur a delay of $\Tmax{i}{1}$.  Then, it computes the range of the free bus slots that the requests of region $g$ may occupy (Lines~5-6), assuming on Line~5 a starting time of $f_{i, g-1}$. 

%When run in isolation the requests of a given region have immediate access to the memory, while with contention, 
%the requests incur a delay, causing the region to \emph{stretch in time} and the requests to be spaced apart. 
To compute the worst-case delay of each region, the algorithm invokes a slightly modified version of Algorithm~\ref{algo:MaxDelay} in which: 
\begin{enumerate}
\item $j$ now spans from $k + \LBslot{i}{g}$ to $\UBslot{i}{g} - (\NbReqPerRegion{i}{g} - k)$, assuming that $\LBslot{i}{g}$ is passed to Algorithm~\ref{algo:MaxDelay} as an additional input parameter and $k$ is the slot index in the algorithm on Line~4, 
\item the 2D array contains $\UBslot{i}{g} - \LBslot{i}{g}$ columns, 
\item all the references to a $\Cell{k}{j}$ are replaced with a reference to $\Cell{k}{j - \LBslot{i}{g}}$, and 
\item references to $C_i$ are substituted for references to $\SamplingRegionSize{i}$. 
\end{enumerate}

Note that a task modeled as a single region is a special case in which $\LBslot{i}{1} = 1$, the region size $\SamplingRegionSize{i}$ is $C_i$, and the maximum number of requests is $\NbReqPerTask{i}$. The delay of the currently analyzed region $\delta_{i,g}$ is computed on Line~7 and is then accounted for in the worst-case finishing time $f_{i,g}$ computed on Line~8. The process is repeated for all the regions and the finishing time of the last region gives the WCET of the task including the maximum cumulative delay for accesses
to shared resources.

\begin{figure}[htb]
\centering
\includegraphics[width=1.0\columnwidth]{region_overlap.jpg}
\caption{Example of overlapping regions.} 
\label{fig:region_overlap}
\end{figure}

It can be seen that for two consecutive regions $g$ and $(g+1)$, the ranges of candidate free bus slots $\left[\LBslot{i}{g}, \UBslot{i}{g}\right]$ and $\left[\LBslot{i}{g+1}, \UBslot{i}{g+1}\right]$ computed at Lines~5 and~6 may overlap. As a result, the finishing time $f_{i, w_i}$ that is returned by the algorithm may sometimes consider a request-set mapping of all the requests in which two requests from two different regions of the task are assigned to a same free bus slot. Even though it may lead to pessimistic (i.e. over-approximated) results, it is safe and \emph{sometimes necessary} for our region-based analysis technique to work with this assumption, because the aggregation of local maximum delays (local to each region) does not always lead to a global maximum delay for the entire task. To illustrate that claim, let us consider an intuitive example: consider a task $\tau_i$ with only 2 regions where each one can generate up to 2 requests. As depicted in Figure~\ref{fig:region_overlap}, their respective ranges of candidate free bus slots overlap. Since we do not assume any specific shape for the functions $\Tmin{i}{}$ and $\Tmax{i}{}$, let us assume that these two functions are defined such that:
\begin{enumerate}
\item there is a free bus slot within $\left[\LBslot{i}{1}, \LBslot{i}{2} \right]$ that can generate a maximum delay of $6$ and all the other slots in that interval generate a delay no greater than~$3$.
\item all the free bus slots that are available to both regions, i.e. within \\ $\left[ \LBslot{i}{2}, \UBslot{i}{1} \right]$, generate a maximum delay of $5$.
\item all the free bus slots within $\left[ \UBslot{i}{1}, \UBslot{i}{2} \right]$, generate a maximum delay of $2$.
\end{enumerate}
With these assumptions, let us create two different request-set mappings for the $4$ requests of $\tau_i$. We call these two mappings: Scenarios~1 and~2 (see Figure~\ref{fig:region_overlap}). In Scenario~1, the \emph{local} maximum delay for the first region is $11$ and it is obtained by assigning its first request within $\left[\LBslot{i}{1}, \LBslot{i}{2} \right]$ to get the maximum delay of $6$ and then assign its second request to one of the free bus slots within $\left[ \LBslot{i}{2}, \UBslot{i}{1} \right]$ to get a delay of $5$. By doing so, the second region that starts after the completion of the first one is only able to get a total delay of $4$, which leads to an overall delay of $6 + 5 + 2 + 2 = 15$ for Scenario~1. In contrast, Scenario~2 assigns the second request of the first region to another time slot within $\left[\LBslot{i}{1}, \LBslot{i}{2} \right]$, which leads to a delay of $9$ time units for that first region. Even though a delay of $9$ for the first region is not a local maximum, it enables the second region to start its execution earlier and benefit from a first delay of $5$ time units, by assigning its first request to a slot within $\left[ \LBslot{i}{2}, \UBslot{i}{1} \right]$, and a second delay of $2$ by assigning its second request to a free bus slot within $\left[ \UBslot{i}{1}, \UBslot{i}{2} \right]$. In this second scenario, the overall delay is $16$, which is higher than in the first scenario. This is why any region-based analysis technique that is based on analyzing each task region separately must be aware that the global maximum delay cannot be obtained by simply adding up the maximum delays local to each region. In order to make the aggregation of local maxima possible (i.e. safe and correct), we allow the ranges of candidate free bus slot slots of each region to overlap. It can be shown that by doing so, the resulting maximum delay obtained for the entire task is pessimistic but it is safe.

\subsection{Reducing Time-complexity}

Algorithm~\ref{algo:MaxDelay} computes the maximum delay that a given task $\tau_i$ may incur with a non-polynomial time-complexity. This non-polynomiality is due to the exponential growth in the number of request-set mappings that each $\Cell{k}{j}$ holds when $k$ and $j$ increase. Specifically, the number of request-set mappings listed in $c(k,j)$ is equal to the number of mappings in $c(k, j-1)$, see Line~14, plus the number of mappings in $c(k-1, j-1)$; For each mapping of $c(k-1, j-1)$ considered at Line~15, one mapping is indeed added to $c(k, j)$ in Line~20. Although Lemmas~\ref{lem:pruning_solution} and~\ref{lem:pruning_solution2} provide two mechanisms that potentially reduce the number of mappings carried on from one iteration of the algorithm to the next, they do not provide any guarantee on the number of mappings that they will be able to discard and hence, they do not reduce the theoretical time-complexity. It is important to highlight that the algorithm proceeds in phases: Once $\Tmin{}{}$ and $\Tmax{}{}$ values are pre-computed for the given arbiter, they are accessed in constant time in the algorithm and therefore the complexity of Algorithm~\ref{algo:MaxDelay} is agnostic to the underlying arbiter.   


A simple way to reduce the complexity is to set up an upper limit on the number of mappings that each cell can hold. Let us denote this limit by $L$. At run time, at the end of each iteration in the inner loop (between Lines~23 and~24), we add a simple test that counts the number $X$ of mappings of the current cell $\Cell{k}{j}$. If this number $X$ is lower than our pre-set limit $L$ then the algorithm proceeds with the next cell. Otherwise, the algorithm takes all the mappings of $c(k,j)$ and collapses them all into a single ``dummy'' mapping that contains the \emph{maximum} cumulative delay, the \emph{minimum} service time, and the \emph{latest} request-to-slot assignment among the delays, service times, and request-to-slot assignments of all the collapsed mappings. It can be easily showed by looking at Algorithm~\ref{algo:MaxDelay} that this choice of parameters for the dummy mapping are the worst, in the sense that those parameters will lead to the maximum resulting delay incurred by the analyzed task $\tau_i$. With 
this 
technique, the time-complexity of Algorithm~\ref{algo:MaxDelay} is reduced to $O(L \times \NbReqPerTask{i} \times \UBslotNew{i})$ at the cost of adding pessimism in the computation, since the dummy mappings retain only the worst parameters that may come from different mappings. This trick hence represents a programmable trade-off between computation time and accuracy of the proposed analysis. 

 Our experimental evaluation uses an implementation of Algorithm~\ref{algo:MaxDelay} that integrates the two optimization mechanisms provided by Lemmas~\ref{lem:pruning_solution} and~\ref{lem:pruning_solution2} as well as the region-based analysis detailed in Section~\ref{sec:complexity_reduction}. Our implementation currently does not use this method, as it successfully ran and analyzed all the benchmark programs used in the experiments in a reasonable time. Therefore, we only present this method here in order to show that there exist solutions to a (theoretical) complexity issue that may potentially arise when running the analysis, but we did not investigate these solutions further as we have not experienced such problems in our simulations. Also we have not yet evaluated the increased pessimism that the dummy mappings could introduce at this point and will consider it for future work. 

\section{Related Work}
\label{sec:related_work}

Several frameworks, such as Real-Time Calculus~\cite{thiele2000real} and Network Calculus~\cite{cruz1991calculus} have been proposed for general delay analysis of shared resources, such as tasks executing on processors, network packets and memory requests. These frameworks typically compute delays as the maximum difference
between worst-case supply and demand functions. Since the frameworks are general, it is up to the user to derive appropriate supply and demand functions for a particular problem.
A key challenge addressed by our approach that is not covered by existing literature on
Real-Time Calculus or Network Calculus is that the worst-case demand
function is not exactly known, as we only have coarse-grained information about the
number of requests from the sampling regions. A main
innovation of our work can hence be described as determining what the
worst-case demand function actually looks like, given these sampling
regions and information about the arbiter. This in turn enables us to
compute the maximum cumulative delay.

The specific topic of bus contention analysis has received considerable attention in recent years
and these efforts can be classified into two classes: 1) approaches
that modify the hardware or the software of the system to enable or improve analysis, and 2)
approaches that analyze a given system without assuming any modification of the hardware and/or software. We proceed by discussing each of these
in turn.

On the hardware side, a number of memory controllers have been designed
specifically for real-time systems and proposed together with
corresponding analyses that bound the WCRT of memory
requests~\cite{Akesson11DATE,Reineke11,Paol,Shah12DATE,wu2013worst,Li14ECRTS}. These analyses
benefit from full knowledge of the internals of the memory
controller, such as page policies,
transaction scheduler and the DRAM command scheduler, and
exploit this information to produce tight bounds. On the software side,
servers with memory budgets, built into the operating system, have been proposed
to limit the memory interference~\cite{Nowotsch14ECRTS, yun12memory, behnam2012memory,yun2013memguard} from tasks executing
on other cores, enabling it to be managed based on enforcement rather than characterization.
Our work contrasts to these efforts in the sense that it 
considers both the software and hardware to be given with no interface or any other means to modify/re-configure it. 

% Add ``The Multi-Resource Server for Predictable Execution on Multi-core Platforms''
% once it has appeared.

Several approaches have been proposed for bus contention analysis
in given COTS platforms.  Similarly to our work, most analyses consider
multi-core systems with a bus providing access to a shared memory with
a single port~\cite{Ernst,ErnstJournal,Jian,Icess11,Icess12,UnifiedWCET:2014,Rodrigues13PADL}.
However, these works are quite different with respect to the considered 
task models and scheduling policies for both the tasks themselves and their
memory requests.
%
Applications are typically modeled as independent periodic/sporadic task sets
or acyclic task graphs~\cite{Rosen07_rtss07,Chatto}, and the scheduling is often based on
fixed-priorities~\cite{Icess11,ErnstJournal}, while tasks in task graphs
are \emph{statically scheduled} using techniques that respect precedence constraints,
e.g. list scheduling.
The approaches support different task preemption models, ranging from fully
preemptive~\cite{Ernst,ErnstJournal} to non-preemptive~\cite{Icess11,Icess12,Rosen07_rtss07,Chatto},
and with limited-preemption at the granularity of TDM time slots as a compromise in between~\cite{Jian}. Most of these works consider analysis of shared resources as a separate analysis, while~\cite{UnifiedWCET:2014,Rodrigues13PADL} integrate it into the WCET estimation tool to exploit information about the execution of the application, such as when memory requests are issued. 

%
%% The task model in~\cite{Jian} is based on superblocks, which are smaller pieces of
%% code with known BCET, WCET, and minimum and maximum number of memory accesses.
%% The concept of superblocks in~\cite{Jian} is related to
%% our regions in the sense that it provides finer-grained
%% information about when memory accesses occur during the execution of a
%% task, improving the accuracy of analysis. 
%% The main difference in this aspect relat	es to the preemption model, since preemption is possible on
%% boundaries of superblocks or sets of superblocks in a TDM time slot, while it is not on region boundaries.
%
A problem with most of the previously mentioned analysis approaches is that they
only support a single bus arbiter, such as an unspecified
work-conserving arbiter~\cite{Icess11,Ernst}, fixed-priority arbitration, round robin~\cite{Icess12},
TDM~\cite{Rosen07_rtss07, Chatto, Schra2010, Schra2011, UnifiedWCET:2014} or
first-come first-served (FIFO). This does not address the diversity of memory
arbiters in contemporary platforms, making them point-solutions exclusive to a
single platform rather than a reusable framework that applies more
generally.  

Of particular interest is the work in~\cite{SchranWRT}, which computes
the worst-case completion time for tasks accessing a resource shared
by a TDM arbiter. Unlike our approach, the arbiter grants access to the 
resource in a coarse-grained manner, where each bus slot has a fixed longer 
duration that typically fits many requests. 
Similarly to the naive approach of using a constant worst-case delay for every memory request, the proposed assumes that each
request is issued at the worst-case time just at the end of the allocated TDM slot, maximizing
the delay. However, it outperforms a naive analysis by considering the maximum number of allocated
TDM slots during which the application can execute without delay when accessing memory. In case
this number is less than the number of memory requests, the remaining number of memory requests can 
be efficiently fetched one after the other during the following allocated TDM slots, resulting in
less pessimistic bounds. The approach hence capitalizes on the fact that the execution requirements
of the task may prevent the worst-case situation from happening to all memory requests.
We compare our generic method with this approach in the next section. 

% We do not have to bring up superblocks unless we choose to discuss the model and relate it to
% our region-based approach. We do not need it for the comparison. 
  %Tasks are divided into superblocks, which correspond to blocks of code and are 
  %specified by their maximum computation time and their
  %maximum number of access requests to the bus.

%% To compute the worst-case completion time, they consider cases which can lead to the worse-case alignment of requests to slots,
%% such that each request can incur the maximum delay. This involves the two scenarios in which the requests may be released either in alignment or out of alignment with the available slots.
%% Their method performs better than the naive case in which every request may be delayed by an entire TDM frame time before it can be serviced due to the worse-case alignment (of a request release). 
%% This is ensured by taking into account the fact that the task must be progressing with its execution if it is not using the available slot for transmitting requests.
%% In cases when the number of requests is larger than the number of required execution slots, the method finishes execution before all memory requests are served and deals with any remaining requests in a dedicated phase
%% where every request is served immediately, leading to tighter estimates than the naive case. 


% This problem is partially mitigated by the analysis
% in~\cite{Jian} that supports three of these
% arbitration mechanisms in a single unified framework, although this
% work is limited to systems where periodic tasks are modeled as sets of superblocks
% and scheduled using TDM. In contrast, our work is more general as it applies to 
% any sporadic constrained-deadline tasks under any non-preemptive
% task scheduler. 

\section{Experimental Evaluation}
\label{sec:experiments}

This section experimentally evaluates the proposed framework by
simulating a multi-core system running real application traces.
First, the experimental setup is explained, followed by three experiments.
The first experiment demonstrates the generality of our approach by executing the
applications with three different arbiters and evaluating the computation time of 
the proposed analysis. The second experiment evaluates the impact
of different region sizes and shows how finer-grained task region-profiles
improve the accuracy and increase the efficiency of the analysis.
Lastly, the final experiment compares our framework to a state-of-the-art analysis approach
for TDM arbitration, i.e. the approach proposed in~\cite{SchranWRT}.

\subsection{Experimental Setup}

The experiments consider a multi-core platform, where the processors are
simulated by the SimpleScalar~3.0 processor simulator~\cite{austin2002simplescalar} with separate
data and instruction caches, each with a size of 16~KB. The L2 cache
is a private unified 128~KB cache with 64~B cache lines and
an associativity of 4. The processor core is assumed to run at a frequency
of 1.6~GHz. The memory is a 64-bit
DDR3-1600 DIMM~\cite{DDR3SPECf} running at a frequency of 800~MHz,
meaning that one memory cycle equals two processor cycles. The memory
access time is $\TR=80$ processor cycles, corresponding to an in-order dynamically scheduled 
DRAM controller with a close-page policy~\cite{Li14ECRTS}.
The experiments consider a platform instance with 4 cores, each core running an
application from the MediaBench test suite~\cite{lee1997mediabench} as a single independent task. 
For each application in the benchmark, memory-trace files were generated by running it on the experimental
platform. The traces were then post-processed according to the
sampling regions used in the experiments to compute the 
region-profiles of the task. The experiments were executed on a computer equipped with Intel Core I5 processor (2.0~GHz, 4~cores) and 4~GB memory.

The essential characteristics of the benchmark applications, which we use in the evaluation set are shown in Table~\ref{tab:characterization}. Note that the table does not contain all applications from the suite, as some of them would not compile with the SimpleScalar toolchain and others would not provide functionally correct output upon verification. Instead of changing the code of these applications and thereby defeating the purpose of standard benchmarks, we opted to exclude these applications. The table shows the execution time of the used applications on the SimpleScalar processor, the total number of memory requests during its execution, and lastly the request density (requests per cycle) as a measure of its memory intensity. It is clear from the table that the chosen benchmark applications 
are not trivial as the execution times are typically several million cycles during which thousands
of requests are issued. \emph{This highlights the scalability of our approach and 
contrasts to previous work that use much smaller applications from the CHStone~\cite{chstone} and 
Malardalen WCET benchmarks~\cite{malarbench}}.

\begin{center}
\begin{table} [h!]
\centering
\caption{Benchmark characterization}
\rowcolors{1}{}{gray!35}
\begin{tabular}{|c|c|c|c|}
\hline 
\emph{Benchmark}      & \emph{Exec. time [Kcycles]} & \emph{Requests} & \emph{Request density}\\ \hline
unepic	       & 15775	              & 67664	 & 4.290  \\ \hline   
jpeg-encode    & 46160                &	92905	 & 2.013  \\ \hline   
epic           & 62540                & 96984    & 1.551  \\ \hline   
jpeg-decode    & 21417                &	22121    & 1.033  \\ \hline   
h263-encode    & 566845               & 418808   & 0.738 \\ \hline  
h263-decode    & 8462	                & 5456     & 0.645  \\ \hline   
mpeg2encode    & 823274               &	319306	 & 0.388  \\ \hline   
gsmdecode      & 43012                &	10104    & 0.235  \\ \hline 
mpeg2decode    & 100454               & 28744   & 0.286 \\ \hline  
adpcmdecode    & 4193                 & 575      & 0.137  \\ \hline   
adpcmencode    & 6358	                & 581      & 0.091  \\ \hline   
g721-decode    & 172563               &	9792     & 0.057  \\ \hline   
g721-encode    & 152829               &	7439     & 0.049  \\ \hline   
\end{tabular}
\label{tab:characterization}
\end{table}
\end{center}

\subsection{Application to Different Arbitration Mechanisms}

The objective of this experiment is to demonstrate the generality of
our approach by applying it to three commonly-used arbiters, being
fixed-priority, an unspecified work-conserving arbiter, and TDM,
respectively.  For each task, we determine the interference from other
tasks and compute the increase in WCET for each of the three arbiters
using a region size of 20 Kcycles.  Other region sizes are evaluated in
the following experiment. We also examine the computation
time of the proposed analysis for the different arbiters.  To get a
representative sample of applications for the WCET benchmark, we chose
4 applications considering the 2 highest and the 2 lowest densities as shown in Table~\ref{tab:characterization}.

The results of the experiment are shown in Figure~\ref{fig:arbiters},
where tasks are arranged in descending order of priorities
(\emph{unepic} has the highest priority) for the case of
fixed-priority arbitration. As expected, for the fixed-priority scheduler the task with the highest
priority experiences no interference (an increase factor of 1x)
from the other tasks.  We observe a counter-intuitive effect in that
\emph{jpegencode} (priority 2) experiences a larger increase in WCET
than the lower priority tasks. This is because \emph{jpegencode} has
higher request density than the two lower priority tasks, implying
that it is more memory intensive. Despite having lower delay
per memory access due to the higher priority, this results in
higher impact of the cumulative delay on the increase factor.


\begin{figure}[htb]
\centering
\includegraphics[scale=0.6]{newarbitercompare20141-crop.pdf}
\caption{Increase in WCET for different arbitration mechanisms.} 
\label{fig:arbiters}
\end{figure}

For the unspecified work-conserving arbiter, the requests of a given
task may be blocked by all requests from all concurrently executing
tasks. During this time, more requests can be injected by other tasks, 
thereby further blocking the requests of the analyzed task. 
Hence, $\Tmax{i}{1}$ is very high for a task blocked by a memory-intensive task. 
This increases the possible number of slots in which requests of the analyzed task may fit and many of these requests may incur a delay of $\Tmax{i}{1}$, leading to a high WCET estimate, as
seen in the figure. 

Note that this arbitration mechanism is equivalent
to fixed-priority arbitration where every task is assumed to have the
lowest priority. This can be seen in Figure~\ref{fig:arbiters}, where
the lowest priority task, \emph{g721encode}, has the same WCET with
fixed-priority arbitration and the unspecified work-conserving
arbiter. It is interesting to note that for \emph{jpegencode}, there is only a
minor difference in the increase factor in the two arbitration mechanisms.
This is because the  other two low-density tasks affect its performance very marginally
and the major interference is still from the task \emph{unepic}.   

Unlike the previous two arbiters, TDM is neither priority-based, nor
work conserving. Here, it is configured with a frame size of 24 and
each of the four cores is allocated 6 slots. Other configurations are evaluated in
the final experiment. We note from the results that TDM arbitration
performs remarkably well compared to fixed-priority arbitration, as only
the highest priority task has a smaller increase factor using
fixed-priorities. An explanation for this is that the worst-case
per-core request-profile (PCRP) computation for the interfering cores
becomes quite pessimistic as it determines the worst possible release
time of all higher-priority requests in the region. On the other hand,
the worst-case for TDM is independent of the release times of other
tasks and the only uncertainty comes from the possible misalignment between the release
of the requests and the TDM table, which is bounded by the frame size.

\begin{figure}[htb]
\centering
\includegraphics[scale=0.6]{arbitercompare12014-crop.pdf}
\caption{Increase in WCET for different arbitration mechanisms.} 
\label{fig:arbiters1}
\end{figure}
Similar trends are seen when the three arbitration mechanisms are applied to another set of benchmarks as shown in Figure~\ref{fig:arbiters1}.

% It would technically be better to allocate only one slot per task
% in this experiment, since the final experiment shows that more slots
% only increase the increase factor. We are hence not showing the best
% TDM allocation here. In the final experiment, it would make sense to
% try assigning more slots per task to see if Schranzhofer starts doing
% better.


Considering the computation time of the analysis, the fixed-priority
arbitration took 8 hours to complete for the entire task set. The
tasks with higher priorities complete faster than the slower ones,
since they are less impacted by interference, resulting in fewer
possible request-set mappings.  This is reflected in the analysis of
the unspecified work-conserving arbiter, where all tasks can suffer
interference from all other tasks, increasing the analysis time to
around 15 hours for the entire task set.  In contrast, the TDM arbiter
is non-work-conserving and thereby completely independent of other
tasks, enabling the computation of $\Tmin{i}{}$ and
$\Tmax{i}{}$ in constant time. Furthermore, small TDM frame sizes
provide relatively few possible request-set mappings, reducing the
total analysis time to less than 45 minutes.

\subsection{Impact of the Region Size}

We also evaluated the impact of the region sizes. To this end, we
re-ran the previous experiments using
both smaller and larger region sizes. Three different sizes are used:
30, 40 and 50 Kcycles, respectively, where larger region sizes
imply fewer regions and coarser-grained request profiles for each
region.  We choose two benchmarks with a high request density,
\emph{epic} and \emph{mpeg2-decode}, and two benchmarks that have low request density,
\emph{g721encode} and \emph{g721decode}.
The results of the experiment using the fixed-priority arbiter 
are shown in Figure~\ref{fig:regions}.
Note that the highest priority task, \emph{epic}, experiences no
interference across all region sizes.  For the other tasks, the
results generally follow the intuition that smaller regions result in
tighter WCET. This is because finer-grained task-region profiles provide more
information about the actual request distribution, eliminating 
unfeasible request releases in the PCRP computation (see~\cite{Icess12} for details about the PCRP computation).

% In the perfect world, we should mention how region size affects analysis time.

\begin{figure}[htb]
\centering
\includegraphics[width=0.9\columnwidth]{sampling2014-crop.pdf}
\caption{Increase in WCET for different region sizes (in cycles) using fixed-priority
arbitration. The highest priority
is given to the task \emph{epic} followed by \emph{g721decode}, etc.} 
\label{fig:regions}
\end{figure}

Results similar to those in Figure~\ref{fig:regions} were also observed for
the unspecified work-conserving arbiter, whose analysis is very similar and also
relies on the PCRP computation. However, the TDM arbiter is 
largely insensitive to changes in the region size. The explanation for this is that it
is independent of the behavior of the other clients and does not benefit from finer-grained
information about their behavior.

%\todo{It should benefit from finer-grained information about its own requests, but this
%probably has much less impact on its increase factor. The only way to know for sure
%is to do more experiments.}

\subsection{Comparison against the State-of-the-art}

The final experiment compares our approach against the state-of-the-art.
Given that no other unified framework exists for comparison, we compared our
approach against the approach in~\cite{SchranWRT}, specifically targeting TDM
arbiters. The results are presented in Table~\ref{tab:TDMcompare} and
show the percentage increase in WCET considering the cumulative delay to serve all
the memory requests in the available slots.  The comparison was done
for different TDM configurations, where different number of consecutive 
slots, $\phi_p$, are allocated to four tasks executing on the different cores. 
The frame size in this experiment is hence equal to $\frameSize = 4 \times \phi_p$. 

The results indicate that our approach provides tighter 
worst-case estimates than the previous approach for most of the  configurations.
The cases in which the SOA performs better, are highlighted in bold, and it can be seen that
the difference is marginal. 
Both approaches perform better with a smaller number of allocated slots,
since this reduces the worst-case misalignment 
between a request release and the next slot allocated to the processor running the task.
%However, we see that the difference between the two approaches
%increases with the number of allocated slots (and the frame size).


%We capture that all requests cannot have worst-case misalignment through first term in 
%Eq. 8. If many slots are allocated consecutively, Tmax of slot K might be a lot earlier
%than rel+Tmax1.

%\todo{Can we explain this? It could be that our approach excludes the possibility of
%all requests getting worst-case alignment, but without instrumentation we cannot confirm this.
%We should also verify if SOA gets into the dedicated transfer phase for any of these
%applications.}

%% However, for very large values of $\phi$ (greater than 300) i.e., when the number
%% of slots avalialble is much much higher than the number of requests,
%% the approach by Schranzofer et al.~\cite{SchranWRT} performs better,
%% since it has a global view of all the available slots and its program
%% model requires bursty accesses to main memory in the acquisition and
%% replication phases to coincide with the available TDM slots. As a
%% result their approach makes a better decision of allocating requests
%% to slots. However, such a high value of $\phi$ is rarely used, as it leads
%% to wasted bus bandwidth when no requests are issued by the
%% allocated core.

%% \todo{Should we take this out? We do not have enough experimental data to really say
%% anything.}

% It would be very nice to have experimental data showing where exactly 
% SOA starts doing better for these tasks. I would not expect a single
% frame size to be a turning point for all applications at the same time.
% This should depend a lot on request density where high density applications
% run out of execution relatively fast and can deal with remaining memory requests
% efficiently. In the perfect world, we should have simulations for regular values
% of phi until we are past the turning points of all applications. Displaying
% this in a graph may lead us to learn a few things about the behavior of
% the two approaches.


\begin{table} [h!]
\caption{Comparison of the increase factor between our unified framework (UF) 
and the state-of-the-art (SOA)~\cite{SchranWRT}.}
\rowcolors{3}{gray!35}{}
\begin{tabular}{|c|c|c|c|c|c|c|}
\hline 
& \multicolumn{2}{c|}{$\phi_p=1$} & \multicolumn{2}{c|}{$\phi_p=5$} & \multicolumn{2}{c|}{$\phi_p=10$} \\ 
\emph{Benchmark} & \emph{UF} & \emph{SOA} & \emph{UF} & \emph{SOA} & \emph{UF} & \emph{SOA} \\ \hline

unepic	     &	\textbf{2.37}	&	\textbf{2.36}	&	2.45	&	4.33	&	2.53	&	4.59		\\ \hline
jpeg-encode	 &	1.64	&	1.64	&	1.73	&	3.35	&	1.82	&	4.35		\\ \hline
epic	       &	\textbf{1.50}	&	\textbf{1.49}	&	1.57	&	2.59	&	1.65	&	3.27		\\ \hline
adpcm-decode	&	1.04	&	1.04	&	1.05	&	1.08	&	1.05	&	1.12		\\ \hline
adpcm-encode	&	1.03	&	1.03	&	1.03	&	1.06	&	1.03	&	1.08		\\ \hline
g721-decode	&	1.01	&	1.02	&	1.04	&	1.07	&	1.08	&	1.15		\\ \hline
g721encode 	&	1.01	&	1.01	&	1.05	&	1.07	&	1.06	&	1.11		\\ \hline
gsmdecode	   &	\textbf{1.08}	&	\textbf{1.07}	&	1.11	&	1.29	&	1.15	&	1.52		\\ \hline
h263decode	&	\textbf{1.21}	&\textbf{	1.20}	&	1.27	&	1.69	&	1.34	&	2.02		\\ \hline
h263encode	&	1.29	&	1.29	&	1.34	&	1.99	&	1.40	&	2.38		\\ \hline
jpegdecode	&	1.33	&	1.33	&	1.36	&	2.11	&	1.39	&	2.15		\\ \hline
mpeg2decode	& 1.12	&	1.12	&	1.15	&	1.45	&	1.19	&	1.73		\\ \hline
mpeg2encode &	1.12	&	1.12	&	1.14	&	1.44	&	1.18	&	1.58		\\ \hline
\end{tabular}
\label{tab:TDMcompare}
\end{table}



\section{Conclusions}
\label{sec:conclusion}

The necessity of deriving tight upper bounds on the contention delay
due to the shared memory bus is an indispensable prerequisite in order
to efficiently compute the worst-case execution time (WCET) of
real-time tasks.  To maximize the applicability of the analysis, this
must furthermore be done in a general way that can easily be applied
to the diversity of arbiters in modern systems.

This work proposed a general framework to address this problem.  A key
novelty of this framework is a general bus availability model that
seamlessly allows different arbiters to be analyzed using a simple
interface.  We demonstrated how to use this interface to characterize
a fixed-priority arbiter, time-division multiplexing (TDM), and explained
how these characterizations are modified to also cover round robin and
an unspecified work-conserving arbiter. The bus availability model was
then leveraged by an arbiter-independent analysis to compute the WCET
of a task when co-scheduled with other tasks contending on the bus.
A key feature of this analysis is that it allows information about
memory requests to be provided in multiple smaller regions to 
speed up the analysis while improving its accuracy.

We experimentally demonstrated that the approach addresses
the diversity problem by applying it to three different arbiters
using applications from the MediaBench suite. The scalability of
the analysis was shown as the analysis completed in 45 minutes to
15 hours depending on the arbiter when considering four 
concurrently executing applications with execution times
of several million clock cycles during which they issue thousands
of requests. We also evaluated the impact of region size for
fixed-priority arbitration and showed how finer-grained information
about requests improve the accuracy of the analysis. Lastly,
we evaluated the quality of the analysis by comparing it with
a state-of-the-art approach to TDM analysis and showed that our approach
consistently resulted in lower WCET of the analyzed applications for
the considered TDM configurations.

\acknowledgement{This work was partially supported by National Funds through \\ FCT/MEC (Portuguese Foundation for Science and Technology) and co-financed by ERDF (European Regional Development Fund) under the PT2020 Partnership, within project UID/CEC/04234/2013 (CISTER Research Centre); by FCT/MEC and the EU ARTEMIS JU within project ARTEMIS/0001/2013 - JU grant nr. 621429 (EMC2); by the North Portugal Regional Operational Programme (ON.2 – O Novo Norte) under the National Strategic Reference Framework (NSRF), through ERDF, and by National Funds through FCT/MEC, within project NORTE-07-0124-FEDER-000063 (BEST-CASE, New Frontiers); and by the European Union under the Seventh Framework Programme (FP7/2007-2013), grant agreement n° 611016 (P-SOCRATES) and by the European social fund within the framework of realizing the project "Support of inter-sectoral mobility and quality enhancement of research teams at Czech Technical University in Prague“, CZ.1.07/2.3.00/30.0034.
} 

\bibliography{contention_framework}

\appendix
\normalsize

\section{Appendix}

This appendix contains the proofs of Lemmas~\ref{lem:wccd}, ~\ref{lem:pruning_solution},
and~\ref{lem:pruning_solution2}. These proofs are all quite long as they are all based on case enumeration.
Although many of the cases in the proof are similar, some even identical, the presented proofs cover all
cases exhaustively for completeness. 

\subsection{Proof of Lemma~\ref{lem:wccd}}

\begin{proof}
We prove the lemma by induction. First, we show in the basic step that the claim is true considering only the first request $\request{i}{1}$ and its slot assignment $\Assignment{i}{1}$. That is, we show that the release and service times given by Equations~\eqref{equ:reqrelk} and~\eqref{equ:reqservk} result in a maximum cumulative delay $\DelayOne{i}{1} = \reqserv{i}{1} - \reqrel{i}{1}$. Then, in the inductive step, we show that if the claim is true considering the set of the first $k$ requests, $k \geq 1$ (induction hypothesis), then the property holds for the first $(k+1)$ requests as well. In other words, assuming that Equations~\eqref{equ:reqrelk} and~\eqref{equ:reqservk} assign a release and service time to the $k$ first requests that result in a maximum cumulative delay $\DelayOne{i}{k}$, then the same equations provide a maximum cumulative delay $\DelayOne{i}{ k+1}$ when applied to the first $(k+1)$ requests. Both the basic and inductive steps are  proven by showing that any other choice of release 
and service time, for any of the 
requests in the considered set of requests, results in a lower cumulative delay.

\paragraph*{Basic step: }\paragraphSpace
By considering only the first request $\request{i}{1}$, it is easy to see that any release time $\reqrel{i}{1}$ different from that 
given by Equation~\eqref{equ:reqrelk} leads to $\reqrel{i}{1} > \Tmin{i}{\Assignment{i}{1} - 1} + 1$. This follows from the fact that having $\reqrel{i}{1} < \Tmin{i}{\Assignment{i}{1} - 1} + 1$ is not possible, as shown in Lemma~\ref{lem:releaseLB}. Besides, choosing any other release time $\reqrel{i}{1} > \Tmin{i}{\Assignment{i}{1} - 1} + 1$ would have as sole impact, a decrease in the difference $(\reqserv{i}{1} - \reqrel{i}{1})$, and subsequently a lower delay $\DelayOne{i}{ 1}$ incurred by request $\request{i}{1}$. In short, since $\Tmin{i}{\Assignment{i}{1} - 1} + 1$ is a lower bound on the release time of request $\request{i}{1}$ (from Lemma~\ref{lem:releaseLB}), choosing $\reqrel{i}{1} = \Tmin{i}{\Assignment{i}{1} - 1} + 1$ is the best choice to guarantee a maximum delay for the first request. 
Similarly, since $\min(\Tmax{i}{\Assignment{i}{k}}, \reqrel{i}{k} + \Tmax{i}{k})$ 
was shown to be an upper bound on the service time of request $\request{i}{k}$, $\forall k$ (see Lemma~\ref{lem:UBreqserv}), it is easy to see that the choice of $\reqserv{i}{1}$ by Equation~\eqref{equ:reqservk} also guarantees a maximum delay for this first request. 
In conclusion, we showed that $\DelayOne{i}{ 1} = \reqserv{i}{1} - \reqrel{i}{1}$ is maximum when $\reqrel{i}{1}$ and $\reqserv{i}{1}$ are given by the equations of Lemma~\ref{lem:wccd}.

\paragraph*{Inductive step:}\paragraphSpace
Assuming that Equations~\eqref{equ:reqrelk} and~\eqref{equ:reqservk} define a release and a service time for the first $k$ requests of $\tau_i$ such that their cumulative delay $\DelayOne{i}{ k}$ is maximized, we will show that defining $\reqrel{i}{k+1}$ and $\reqserv{i}{k+1}$ using the equations of Lemma~\ref{lem:wccd} maximizes $\DelayOne{i}{ k+1}$. 
By applying the same reasoning as in the basic step, it is evident that choosing any other value of $\reqrel{i}{k+1}$ greater than its lower bound 
(given in Lemma~\ref{lem:releaseLB} and Equation~\eqref{equ:reqrelk}) and/or any other service time $\reqserv{i}{k+1}$ lower than its upper bound 
(given in Lemma~\ref{lem:UBreqserv} and Equation~\eqref{equ:reqservk}) induces a lower delay for request $\request{i}{k+1}$, and thus a lower cumulative delay $\DelayOne{i}{ k+1}$. 

However, it may be noted from the release-time equation (Equation~\ref{equ:reqrelk}) that the choice of service time $\reqserv{i}{k}$ of the previous request $\request{i}{k}$ influences the lower bound on $\reqrel{i}{k+1}$, and subsequently an upper bound on $\reqserv{i}{k+1}$ (see Equation~\eqref{equ:reqservk}). One should therefore investigate the following question: although choosing $\reqserv{i}{k} = \min( \Tmax{i}{\Assignment{i}{k}}, \reqrel{i}{k} + \Tmax{i}{1})$ guarantees a maximum cumulative delay $\DelayOne{i}{ k}$ for the first $k$ requests (from the induction hypothesis), doing so might define a range of possible values for $\reqrel{i}{k+1}$ that discards those leading to a maximum cumulative delay $\DelayOne{i}{ k+1}$. The remainder of this proof consists of showing that any value of $\reqserv{i}{k}$ different from that given by Equation~\eqref{equ:reqservk} results in a lower cumulative delay $\DelayOne{i}{ k+1}$.

To figure out how $\reqserv{i}{k}$ affects the range of possible values for $\reqrel{i}{k+1}$ and $\reqserv{i}{k+1}$, 
let us consider different values $X$ and $Y$ for $\reqserv{i}{k}$, where $X = \min( \Tmax{i}{\Assignment{i}{k}}, \reqrel{i}{k} + \Tmax{i}{1})$ (as given by Expression~\eqref{equ:reqservk}) and $Y$ is any positive number $< X$. We show in the following that $\DelayOne{i}{ k+1}$ is always maximum for $\reqserv{i}{k} = X$. 

We first introduce two symbols for compaction and readability: 
\begin{eqnarray}
\ConstantOne & \equals & \Tmin{i}{\Assignment{i}{k+1} - 1} + 1 \mbox{ (first term in Equation~\eqref{equ:LBreqrel})} \nonumber \\
\Delta_{k+1} & \equals & (\Assignment{i}{k+1} - \Assignment{i}{k}) \times \TR \mbox{ (parts of second term in Equation~\eqref{equ:LBreqrel})} \nonumber
\end{eqnarray}
We know from Lemma~\ref{lem:releaseLB} that $\reqrel{i}{k+1} \geq \max( \ConstantOne, \reqserv{i}{k} + \Delta_{k+1} )$ and thus three cases may arise depending on the request-to-slot assignment $\Assignment{i}{k+1}$ of request $\request{i}{k+1}$ (these three cases are a simple enumeration of all possible ``dominance'' relations between the three considered terms):
\begin{enumerate}
 \item $\ConstantOne \leq Y + \Delta_{k+1} < X + \Delta_{k+1}$
 \item $Y + \Delta_{k+1} < \ConstantOne \leq X + \Delta_{k+1}$
 \item $Y + \Delta_{k+1} \leq X + \Delta_{k+1} \leq \ConstantOne$
\end{enumerate}

\noindent We proceed by proving each of these cases.

\noindent\textbf{Case 1:} $\ConstantOne \leq Y + \Delta_{k+1} < X + \Delta_{k+1}$ \\

\noindent In this case, choosing $\reqserv{i}{k} = Y$ leads to $\reqrel{i}{k+1} \geq Y + \Delta_{k+1}$ (from Lemma~\ref{lem:releaseLB}). 
By setting $\reqrel{i}{k+1}$ to $Y + \Delta_{k+1}$, we get 
\begin{eqnarray}
\DelayOne{i}{k+1} & = & \sum_{\ell=1}^{k+1} (\reqserv{i}{\ell} - \reqrel{i}{\ell})  \nonumber \\
& = & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + (\reqserv{i}{k} - \reqrel{i}{k}) + (\reqserv{i}{k+1} - \reqrel{i}{k+1}) \nonumber \\
& = & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + Y - \reqrel{i}{k} + \reqserv{i}{k+1} - (Y + \Delta_{k+1})  \nonumber \\
\label{equ:case1choice1} & = & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + \reqserv{i}{k+1} - \reqrel{i}{k} - \Delta_{k+1} 
\end{eqnarray}
On the other hand, choosing $\reqserv{i}{k} = X$ leads to $\reqrel{i}{k+1}\geq X + \Delta_{k+1}$ 
(from Lemma~\ref{lem:releaseLB}). Then, if we set $\reqrel{i}{k+1}= X + \Delta_{k+1}$ (i.e., the earliest possible release time) then applying the same reasoning as above leads to the same equality, i.e.,
\begin{equation}
\DelayOne{i}{k+1}   =  \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + \reqserv{i}{k+1} - \reqrel{i}{k} - \Delta_{k+1} 
\label{equ:case1choice2} 
\end{equation}
Since~\eqref{equ:case1choice1} =~\eqref{equ:case1choice2}, it is correct to claim that choosing $\reqserv{i}{k} = X$ leads to a worst-case cumulative delay $\DelayOne{i}{k+1} $.\\

\noindent\textbf{Case 2:} $Y + \Delta_{k+1} < \ConstantOne \leq X + \Delta_{k+1}$ \\

\noindent In this case, choosing $\reqserv{i}{k} = Y$ leads to $\reqrel{i}{k+1}\geq \Tmin{}{\Assignment{i}{k+1}-1}$ (from Lemma~\ref{lem:releaseLB}). 
Let $\reqrel{i}{k+1}= \Tmin{i}{\Assignment{i}{k+1}-1}$ (i.e., the earliest possible release time-instant), from a reasoning similar to that above it holds that
\begin{eqnarray}
\DelayOne{i}{k+1} & = & \sum_{\ell=1}^{k+1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) \nonumber  \\
& = & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + Y - \reqrel{i}{k} + \reqserv{i}{k+1} -\ConstantOne   \nonumber \\
& < & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + Y - \reqrel{i}{k} + \reqserv{i}{k+1} - (Y + \Delta_{k+1})  \nonumber \\
\label{equ:case2choice1} & < & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) - \reqrel{i}{k} + \reqserv{i}{k+1} - \Delta_{k+1} 
\end{eqnarray}
On the other hand, choosing $\reqserv{i}{k} = X$ leads to $\reqrel{i}{k+1}\geq X + \Delta_{k+1}$ (from Lemma~\ref{lem:releaseLB}). If $\reqrel{i}{k+1}= X + \Delta_{k+1}$, 
then the cumulative delay $\DelayOne{i}{k+1} $ of requests $\request_1, \request_2, \ldots, \request_{k+1}$ is given by
\begin{eqnarray}
\DelayOne{i}{k+1} & = & \sum_{\ell=1}^{k+1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) \nonumber \\
& = & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + X - \reqrel{i}{k} + \reqserv{i}{k+1} -\ConstantOne  \nonumber \\
& \geq & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell}) + X - \reqrel{i}{k} + \reqserv{i}{k+1} - (X + \Delta_{k+1}) \nonumber \\
\label{equ:case2choice2} & \geq & \sum_{\ell=1}^{k-1} (\reqserv{i}{\ell} - \reqrel{i}{\ell})  - \reqrel{i}{k} + \reqserv{i}{k+1} - \Delta_{k+1}
\end{eqnarray}
Since~\eqref{equ:case2choice2} $>$~\eqref{equ:case2choice1}, we can conclude that the cumulative delay is higher for $\reqserv{i}{k} = X$.\\

\noindent\textbf{Case 3:} $Y + \Delta_{k+1} \leq X + \Delta_{k+1} \leq \ConstantOne$ \\

\noindent In this case, choosing either $\reqserv{i}{k} = Y$ or $\reqserv{i}{k} = X$ leads to $\reqrel{i}{k+1}\geq \Tmin{i}{\reqserv{i}{k+1}-1}$ (from Lemma~\ref{lem:releaseLB}). Therefore, the range of possible values for $\reqrel{i}{k+1}$ is not affected by the choice of $\reqserv{i}{k}$ and the maximum cumulative delay is obviously obtained for $\reqserv{i}{k} = X$. \qed
\end{proof}

\subsection{Proof of Lemma~\ref{lem:pruning_solution}}

\begin{proof}
The proof must show that given Conditions~\eqref{equ:lem_assumption0},~\eqref{equ:lem_assumption1}, and~\eqref{equ:lem_assumption2},
Equations~\eqref{equ:lem_obj0},~\eqref{equ:lem_obj1}, and~\eqref{equ:lem_obj2} hold.
From the claim itself, Equation~\eqref{equ:lem_obj0} trivially holds. We stated this equality only for completeness in order to show that the situation after assigning the ($k+1$)'th request is identical to the situation before assigning it. Let us start the proof by introducing some symbols to improve readability:
\begin{eqnarray}
\ConstantOne & \equals & \Tmin{i}{\AssignmentOne{i}{k+1} - 1} + 1 \nonumber \\
\ConstantTwo & \equals & \Tmax{i}{\AssignmentOne{i}{k+1}} \nonumber \nonumber \\
\Delta_{k+1} & \equals & (\AssignmentOne{i}{k+1} - \AssignmentOne{i}{k}) \times \TR \nonumber \\
\Delta'_{k+1} & \equals & (\AssignmentTwo{i}{k+1} - \AssignmentTwo{i}{k}) \times \TR \nonumber 
\end{eqnarray}
According to these new symbols and the equations of Lemma~\ref{lem:wccd}, the four quantities $\reqservOne{i}{k+1}$, $\reqrelOne{i}{k+1}$, $\reqservTwo{i}{k+1}$, and $\reqrelTwo{i}{k+1}$ can be re-written as
\begin{eqnarray}
\label{equ:reqrelOne} \reqrelOne{i}{k+1} & = & \max(\ConstantOne, \reqservOne{i}{k} + \Delta_{k+1}) \\
\label{equ:reqservOne} \reqservOne{i}{k+1} & = & \min(\ConstantTwo, \reqrelOne{i}{k+1} + \Tmax{i}{1}) \\
\label{equ:reqrelTwo} \reqrelTwo{i}{k+1} & = & \max(\ConstantOne, \reqservTwo{i}{k} + \Delta'_{k+1})  \\
\label{equ:reqservTwo} \reqservTwo{i}{k+1} & = & \min(\ConstantTwo, \reqrelTwo{i}{k+1} + \Tmax{i}{1})  
\end{eqnarray}
From~\eqref{equ:lem_assumption2}, it holds that
\[ \reqservTwo{i}{k} - \AssignmentTwo{i}{k} \times \TR \leq \reqservOne{i}{k} - \AssignmentOne{i}{k} \times \TR \]
By adding $h \times \TR$ to both sides of this inequality, we get
\[ \reqservTwo{i}{k} + (h - \AssignmentTwo{i}{k}) \times \TR \leq \reqservOne{i}{k} + (h - \AssignmentOne{i}{k}) \times \TR \]
and the symbols $\Delta_{k+1}$ and $\Delta'_{k+1}$ can now be used to simplify this result: 
%
\begin{equation}
\label{equ:global_case}
\reqservTwo{i}{k} + \Delta'_{k+1} \leq \reqservOne{i}{k} + \Delta_{k+1}
\end{equation}
%
In order to prove that Inequalities~\eqref{equ:lem_obj1} and~\eqref{equ:lem_obj2} always hold true, we must investigate three cases. These three cases simply come from an enumeration of all possible ``dominance'' relations between the three terms $\ConstantOne$, $\reqservOne{i}{k} + \Delta_{k+1}$, and $\reqservTwo{i}{k} + \Delta'_{k+1}$:
\begin{itemize}
 \item {Case 1:} $\reqservTwo{i}{k} + \Delta'_{k+1} \leq \reqservOne{i}{k} + \Delta_{k+1} \leq \ConstantOne$ 
 \item {Case 2:} $\reqservTwo{i}{k} + \Delta'_{k+1} \leq \ConstantOne \leq \reqservOne{i}{k} + \Delta_{k+1}$ 
 \item {Case 3:} $\ConstantOne \leq \reqservTwo{i}{k} + \Delta'_{k+1} \leq \reqservOne{i}{k} + \Delta_{k+1}$ 
\end{itemize}

\noindent\textbf{Case 1:} $\reqservTwo{i}{k} + \Delta'_{k+1} \leq \reqservOne{i}{k} + \Delta_{k+1} \leq \ConstantOne$ \\

\noindent In this case, we have 
\begin{align}
\reqrelOne{i}{k+1} & = \reqrelTwo{i}{k+1} = \ConstantOne & \mbox{from~\eqref{equ:reqrelOne} and~\eqref{equ:reqrelTwo}} \nonumber \\
\mbox{and thus } \reqservOne{i}{k+1} & = \reqservTwo{i}{k+1} & \mbox{from~\eqref{equ:reqservOne} and~\eqref{equ:reqservTwo}} \nonumber 
\end{align}
These service times trivially satisfy Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. Then, using $\reqrelOne{i}{k+1} = \reqrelTwo{i}{k+1}$, $\reqservOne{i}{k+1} = \reqservTwo{i}{k+1}$, and $\DelayOne{i}{k} \leq \DelayTwo{i}{k}$ from~\eqref{equ:lem_assumption1}, we get
%
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} \leq \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
This inequality can be re-written as
%
\[ \DelayOne{i}{k+1} \leq \DelayTwo{i}{k+1} \]
%
which satisfies Condition~\eqref{equ:lem_obj1}. \\

\noindent\textbf{Case 2:} $\reqservTwo{i}{k} + \Delta'_{k+1} \leq \ConstantOne \leq \reqservOne{i}{k} + \Delta_{k+1}$ \\

\noindent In this case, from~\eqref{equ:reqrelOne} and~\eqref{equ:reqrelTwo} we have the following relation between the release time-instants of the $(k+1)$'th request in the mappings $\Mapping_i$ and $\Mapping'_i$: 
%
\begin{equation}
\label{equ:case2_rel}
\reqrelOne{i}{k+1} = \reqservOne{i}{k} + \Delta_{k+1} \geq \reqrelTwo{i}{k+1} = \ConstantOne
\end{equation}
%
Next, we need to handle the relation between the service times $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ of this last request and we must explore three more sub-cases. These three sub-cases simply come from an enumeration of all possible ``dominance'' relations between the three terms $\ConstantTwo$, $\reqrelOne{i}{k+1} + \Tmax{i}{1}$, and $\reqrelTwo{i}{k+1} + \Tmax{i}{1}$:
\begin{itemize}
 \item {Case 2.1:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ 
 \item {Case 2.2:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$
 \item {Case 2.3:} $\ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$
\end{itemize}

\noindent\textbf{Case 2.1:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq\ConstantTwo$ \\

\noindent In this particular sub-case, it holds from~\eqref{equ:reqservOne} and~\eqref{equ:reqservTwo} that
%
\begin{eqnarray}
\label{equ:case2.1_serv1} \reqservOne{i}{k+1} & = & \reqrelOne{i}{k+1} + \Tmax{i}{1} \\
\label{equ:case2.1_serv2} \reqservTwo{i}{k+1} & = & \reqrelTwo{i}{k+1} + \Tmax{i}{1}
\end{eqnarray}
%
and it immediately follows from~\eqref{equ:case2_rel},~\eqref{equ:case2.1_serv1} and~\eqref{equ:case2.1_serv2} that $\reqservOne{i}{k+1} \geq \reqservTwo{i}{k+1}$ , which satisfies Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} =~h$. Also from~\eqref{equ:case2.1_serv1} and~\eqref{equ:case2.1_serv2}, it holds that $\reqservOne{i}{k+1} - \reqrelOne{i}{k+1} = \Tmax{i}{1} = \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1}$ and by using this equality together with $\DelayOne{i}{k} \leq \DelayTwo{i}{k}$ from~\eqref{equ:lem_assumption1}, we obtain
%
\[ \DelayOne{i}{k} + \Tmax{i}{1}  \leq \DelayTwo{i}{k} + \Tmax{i}{1} \]
%
and thus
%
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} \leq \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
By re-writing this inequality we get
%
\[ \DelayOne{i}{k+1} \leq \DelayTwo{i}{k+1} \]
%
which satisfies Condition~\eqref{equ:lem_obj1}. \\

\noindent\textbf{Case 2.2:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$ \\

\noindent In this case, we have 
\begin{align}
\reqservOne{i}{k+1} & = \ConstantTwo & \mbox{from~\eqref{equ:reqservOne}} \nonumber \\
\mbox{and } \reqservTwo{i}{k+1} & = \reqrelTwo{i}{k+1} + \Tmax{i}{1} & \mbox{from~\eqref{equ:reqservTwo}} \nonumber
\end{align}
It thus holds from Case~2.2 that $\reqservOne{i}{k+1} \geq \reqservTwo{i}{k+1}$ and these service times trivially satisfy Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. Then, assuming \emph{by contradiction} that Condition~\eqref{equ:lem_obj1} is \emph{not} satisfied, we must have:
%
\[ \DelayOne{i}{k+1} > \DelayTwo{i}{k+1} \]
%
which can be re-written as
%
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
By replacing $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ with their values, we get
%
\[ \DelayOne{i}{k} +  \ConstantTwo - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \Tmax{i}{1} \]
%
and then,
%
\[ \DelayOne{i}{k} > \DelayTwo{i}{k} + \Tmax{i}{1} - (\ConstantTwo - \reqrelOne{i}{k+1}) \]
%
and since from Case~2.2 $\ConstantTwo - \reqrelOne{i}{k+1} \leq \Tmax{i}{1}$, it follows from the above inequality that
%
\[ \DelayOne{i}{k} > \DelayTwo{i}{k} \]
%
which contradicts Condition~\eqref{equ:lem_assumption1}. This contradiction implies that Condition~\eqref{equ:lem_obj1} \emph{is} satisfied.  \\

\noindent\textbf{Case 2.3:} $\ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$ \\

\noindent In this case it holds from~\eqref{equ:reqservOne} and~\eqref{equ:reqservTwo} that 
\[ \reqservOne{i}{k+1} = \reqservTwo{i}{k+1} = \ConstantTwo \] 
and it immediately follows that $\reqservOne{i}{k+1} \geq \reqservTwo{i}{k+1}$, which satisfies Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. Then, assuming \emph{by contradiction} that Condition~\eqref{equ:lem_obj1} is \emph{not} satisfied, we must have:
%
\[ \DelayOne{i}{k+1} > \DelayTwo{i}{k+1} \]
%
which can be re-written as
%
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
By replacing $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ with their values, we get
%
\[ \DelayOne{i}{k} +  \ConstantTwo - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} +  \ConstantTwo - \reqrelTwo{i}{k+1} \]
%
and then,
%
\[\DelayOne{i}{k} > \DelayTwo{i}{k} + \reqrelOne{i}{k+1} - \reqrelTwo{i}{k+1}  \]
%
From Equation~\eqref{equ:case2_rel}, a case condition of Case~2,we have $\reqrelOne{i}{k+1} \geq \reqrelTwo{i}{k+1}$ and it follows from the above inequality that
%
\[\DelayOne{i}{k} > \DelayTwo{i}{k} \]
%
which contradicts Condition~\eqref{equ:lem_assumption1}. This contradiction implies that Condition~\eqref{equ:lem_obj1} \emph{is} satisfied.  \\

\noindent\textbf{Case 3:} $\ConstantOne \leq \reqservTwo{i}{k} + \Delta'_{k+1} \leq \reqservOne{i}{k} + \Delta_{k+1}$ \\

\noindent In this case, from~\eqref{equ:reqrelOne} and~\eqref{equ:reqrelTwo} we have the following relation between the release time-instants of the $(k+1)$'th request in the mappings $\Mapping_i$ and $\Mapping'_i$: 
%
\begin{eqnarray}
\label{equ:case3_rel1} \reqrelOne{i}{k+1} & = \reqservOne{i}{k} + \Delta_{k+1} \nonumber \\
\label{equ:case3_rel2} \reqrelTwo{i}{k+1} & = \reqservTwo{i}{k} + \Delta'_{k+1} \nonumber
\end{eqnarray}
%
Next, we need to handle the relation between the service times $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ of this last request and we hence have the same three sub-cases to explore as in Case~2, but with the slightly different case conditions of Case~3.
\begin{itemize}
 \item{Case 3.1:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ 
 \item{Case 3.2:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$ 
 \item{Case 3.3:} $\ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$
\end{itemize}

\noindent\textbf{Case 3.1:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ \\

\noindent From~\eqref{equ:reqservOne} and~\eqref{equ:reqservTwo}, we get 
\begin{eqnarray}
\label{equ:case3.1_serv1} \reqservOne{i}{k+1} & = & \reqrelOne{i}{k+1} + \Tmax{i}{1} \\
\label{equ:case3.1_serv2} \mbox{and } \reqservTwo{i}{k+1} & = & \reqrelTwo{i}{k+1} + \Tmax{i}{1} 
\end{eqnarray}
%
and it immediately follows from~\eqref{equ:case3.1_serv1} and~\eqref{equ:case3.1_serv2} that $\reqservOne{i}{k+1} \geq \reqservTwo{i}{k+1}$, which satisfies Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. Also from~\eqref{equ:case3.1_serv1} and~\eqref{equ:case3.1_serv2}, it holds that $\reqservOne{i}{k+1} - \reqrelOne{i}{k+1} = \Tmax{i}{1} = \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1}$. Similarly to Case~2.1, by using this equality together with $\DelayOne{i}{k} \leq \DelayTwo{i}{k}$ from~\eqref{equ:lem_assumption1}, we obtain
\[ \DelayOne{i}{k} + \Tmax{i}{1}  \leq \DelayTwo{i}{k} + \Tmax{i}{1} \]
and thus
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} \leq \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
By re-writing this inequality we get
\[ \DelayOne{i}{k+1} \leq \DelayTwo{i}{k+1} \]
which satisfies Condition~\eqref{equ:lem_obj1}. \\

\noindent\textbf{Case 3.2:} $\reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$ \\

\noindent In this case, we have
\begin{eqnarray}
\reqservOne{i}{k+1} = & \ConstantTwo & \mbox{ from~\eqref{equ:reqservOne}} \nonumber \\
\reqservTwo{i}{k+1} = & \reqrelTwo{i}{k+1} + \Tmax{i}{1} & \mbox{ from~\eqref{equ:reqservTwo}} \nonumber
\end{eqnarray}
It thus holds from Case~3.2 that $\reqservOne{i}{k+1} \geq \reqservTwo{i}{k+1}$ and these service times trivially satisfy Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. Then, assuming \emph{by contradiction} that Condition~\eqref{equ:lem_obj1} is \emph{not} satisfied, we must have:
\[ \DelayOne{i}{k+1} > \DelayTwo{i}{k+1} \]
which can be re-written as 
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
By replacing $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ with their values, we get
\[ \DelayOne{i}{k} +  \ConstantTwo - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \Tmax{i}{1} \]
and then,
\[ \DelayOne{i}{k} > \DelayTwo{i}{k} + \Tmax{i}{1} - (\ConstantTwo - \reqrelOne{i}{k+1}) \]
and since from Case~3.2 $\ConstantTwo - \reqrelOne{i}{k+1} \leq \Tmax{i}{1}$, it follows from the above inequality that
\[ \DelayOne{i}{k} > \DelayTwo{i}{k} \]
which contradicts Condition~\eqref{equ:lem_assumption1}. This contradiction implies that Condition~\eqref{equ:lem_obj1} \emph{is} satisfied.  \\

\noindent\textbf{Case 3.3:} $\ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \reqrelOne{i}{k+1} + \Tmax{i}{1}$ \\

\noindent In this case, it holds from~\eqref{equ:reqservOne} and~\eqref{equ:reqservTwo} that 
\[ \reqservOne{i}{k+1} = \reqservTwo{i}{k+1} = \ConstantTwo \] 
and it immediately follows that $\reqservOne{i}{k+1} \geq \reqservTwo{i}{k+1}$, which satisfies Condition~\eqref{equ:lem_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. Then, assuming \emph{by contradiction} that Condition~\eqref{equ:lem_obj1} is \emph{not} satisfied, we must have:
\[ \DelayOne{i}{k+1} > \DelayTwo{i}{k+1} \]
which can be re-written as
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
By replacing $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ with their values, we get
\[ \DelayOne{i}{k} +  \ConstantTwo - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} +  \ConstantTwo - \reqrelTwo{i}{k+1} \]
and then,
\[\DelayOne{i}{k} > \DelayTwo{i}{k} + \reqrelOne{i}{k+1} - \reqrelTwo{i}{k+1}  \]
%
From Case~3.3, we have $\reqrelOne{i}{k+1} \geq \reqrelTwo{i}{k+1}$ and it follows from the above inequality that 
\[\DelayOne{i}{k} > \DelayTwo{i}{k} \]
which contradicts Condition~\eqref{equ:lem_assumption1}. This contradiction implies that Condition~\eqref{equ:lem_obj1} \emph{is} satisfied.  \qed
\end{proof}

\subsection{Proof of Lemma~\ref{lem:pruning_solution2}}

\begin{proof}
The proof must show that given Conditions~\eqref{equ:lem2_assumption0},~\eqref{equ:lem2_assumption1}, and~\eqref{equ:lem2_assumption2}, Equations~\eqref{equ:lem2_obj0},~\eqref{equ:lem2_obj1}, and~\eqref{equ:lem2_obj2} hold.
From the claim itself, Equation~\eqref{equ:lem2_obj0} trivially holds since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. We stated this equality only for completeness in order to show that the situation after assigning the ($k+1$)'th request is same as the situation before assigning it.  Let us start the proof by introducing some symbols to improve readability:
%
\begin{eqnarray}
& & \ConstantOne \equals \Tmin{i}{h - 1} + 1  \text{ and } \ConstantTwo \equals \Tmax{i}{h} \nonumber \\
& \text{and} & \Delta_{k+1} \equals (h - \AssignmentOne{i}{k}) \times \TR \text{ and } \Delta'_{k+1} \equals (h - \AssignmentTwo{i}{k}) \times \TR \nonumber 
\end{eqnarray}
% 
According to these new symbols and from the equations of Lemma~\ref{lem:wccd}, the four quantities $\reqservOne{i}{k+1}$, $\reqrelOne{i}{k+1}$, $\reqservTwo{i}{k+1}$, and $\reqrelTwo{i}{k+1}$ can be re-written as
% 
\begin{eqnarray}
\label{equ:lem2_reqrelOne} \reqrelOne{i}{k+1} & = & \max(\ConstantOne, \reqservOne{i}{k} + \Delta_{k+1}) \\
\label{equ:lem2_reqservOne} \reqservOne{i}{k+1} & = & \min(\ConstantTwo, \reqrelOne{i}{k+1} + \Tmax{i}{1}) \\
\label{equ:lem2_reqrelTwo} \reqrelTwo{i}{k+1} & = & \max(\ConstantOne, \reqservTwo{i}{k} + \Delta'_{k+1})  \\
\label{equ:lem2_reqservTwo} \reqservTwo{i}{k+1} & = & \min(\ConstantTwo, \reqrelTwo{i}{k+1} + \Tmax{i}{1})  
\end{eqnarray}
% 
According to~\eqref{equ:lem2_assumption2}, we have 
%
\[ \reqservOne{i}{k} - \AssignmentOne{i}{k} \times \TR \leq \reqservTwo{i}{k} - \AssignmentTwo{i}{k} \times \TR \]
%
and by adding ``$h \times \TR$'' to both sides we get
%
\[ \reqservOne{i}{k} + (h - \AssignmentOne{i}{k}) \times \TR \leq \reqservTwo{i}{k} + (h - \AssignmentTwo{i}{k}) \times \TR \]
%
which gives, by definition of $\Delta_{k+1}$ and $\Delta'_{k+1}$,
%
\begin{equation}
\label{equ:lem2_global_case}
\reqservOne{i}{k} + \Delta_{k+1} \leq \reqservTwo{i}{k} + \Delta'_{k+1}
\end{equation}
%
With the help of Inequality~\eqref{equ:lem2_global_case}, we will now prove that Inequalities~\eqref{equ:lem2_obj1} and~\eqref{equ:lem2_obj2} always hold true (remember that Inequality~\eqref{equ:lem2_obj0} is always satisfied). Note that both Inequalities~\eqref{equ:lem2_obj1} and~\eqref{equ:lem2_obj2} are indirectly based on the release and service time of the $(k+1)$'th request in both mappings $\Mapping_i$ and $\Mapping_i'$, i.e. they are based on the four quantities $\reqservOne{i}{k+1}$, $\reqrelOne{i}{k+1}$, $\reqservTwo{i}{k+1}$, and $\reqrelTwo{i}{k+1}$. Therefore, if we first focus on the relation between the release times $\reqrelOne{i}{k+1}$ and $\reqrelTwo{i}{k+1}$ in the two mappings $\Mapping_i$ and $\Mapping_i'$ then it holds from~\eqref{equ:lem2_global_case},~\eqref{equ:lem2_reqrelOne}, and~\eqref{equ:lem2_reqrelTwo} that only three cases must be investigated:
\begin{itemize}
 \item{Case 1:} $\reqservOne{i}{k} + \Delta_{k+1} \leq \reqservTwo{i}{k} + \Delta'_{k+1} \leq \ConstantOne$ 
 \item{Case 2:} $\reqservOne{i}{k} + \Delta_{k+1} \leq \ConstantOne \leq \reqservTwo{i}{k} + \Delta'_{k+1}$ 
 \item{Case 3:} $\ConstantOne \leq \reqservOne{i}{k} + \Delta_{k+1} \leq \reqservTwo{i}{k} + \Delta'_{k+1}$ 
\end{itemize}

\noindent\textbf{Case 1:} $\reqservOne{i}{k} + \Delta_{k+1} \leq \reqservTwo{i}{k} + \Delta'_{k+1} \leq \ConstantOne$ \\

\textit{Proof of~\eqref{equ:lem2_obj2}:} In this case, we have from~\eqref{equ:lem2_reqrelOne} and~\eqref{equ:lem2_reqrelTwo}, $\reqrelOne{i}{k+1} = \reqrelTwo{i}{k+1} = \ConstantOne$ and from~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo}, $\reqservOne{i}{k+1} = \reqservTwo{i}{k+1}$ , which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. \\

\textit{Proof of~\eqref{equ:lem2_obj1}:} By combining~\eqref{equ:lem2_assumption0} with~\eqref{equ:lem2_assumption2} we get $\reqservOne{i}{k} \leq \reqservTwo{i}{k}$ and thus it holds from Inequality~\eqref{equ:lem2_assumption1} that $\DelayOne{i}{k} \leq \DelayTwo{i}{k}$. Therefore, since $\reqrelOne{i}{k+1} = \reqrelTwo{i}{k+1}$ and $\reqservOne{i}{k+1} = \reqservTwo{i}{k+1}$ it also holds that
%
\[ \DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} \leq \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
and thus,
%
\[ \DelayOne{i}{k+1} \leq \DelayTwo{i}{k+1} \]
%
and since $\reqservOne{i}{k+1} = \reqservTwo{i}{k+1}$ in this case, we can write
\[ \DelayOne{i}{k+1} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1})\leq \DelayTwo{i}{k+1} \]
which satisfies~\eqref{equ:lem2_obj1}.\\

\noindent\textbf{Case 2:} $\reqservOne{i}{k} + \Delta_{k+1} \leq \ConstantOne \leq \reqservTwo{i}{k} + \Delta'_{k+1} $ \\

\noindent In this case, we get from~\eqref{equ:lem2_reqrelOne} and~\eqref{equ:lem2_reqrelTwo}, 
%
\begin{equation}
\label{equ:lem2_case2_rel}
\reqrelTwo{i}{k+1} = \reqservTwo{i}{k} + \Delta'_{k+1} \geq \reqrelOne{i}{k+1} = \ConstantOne
\end{equation}
%
Next, we need to handle the relation between the service times $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ in the two mappings $\Mapping_i$ and $\Mapping_i'$ and it holds from~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo} that we have three more sub-cases to explore:

\begin{itemize}
 \item{Case 2.1:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ 
 \item{Case 2.2:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$ 
 \item{Case 2.3:} $\ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$
\end{itemize}

\noindent\textbf{Case 2.1:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ \\

\textit{Proof of~\eqref{equ:lem2_obj2}:} From~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo} we get 
%
\begin{eqnarray}
\label{equ:lem2_case2.1_serv1} \reqservOne{i}{k+1} & = & \reqrelOne{i}{k+1} + \Tmax{i}{1} \\
\label{equ:lem2_case2.1_serv2} \reqservTwo{i}{k+1} & = & \reqrelTwo{i}{k+1} + \Tmax{i}{1}
\end{eqnarray}
%
From~\eqref{equ:lem2_case2_rel},~\eqref{equ:lem2_case2.1_serv1} and~\eqref{equ:lem2_case2.1_serv2}, it immediately follows that $\reqservOne{i}{k+1} \leq \reqservTwo{i}{k+1}$, which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} =~h$. \\

\textit{Proof of~\eqref{equ:lem2_obj1}:} Also from~\eqref{equ:lem2_case2.1_serv1} and~\eqref{equ:lem2_case2.1_serv2}, it holds that $\reqservOne{i}{k+1} - \reqrelOne{i}{k+1} = \Tmax{i}{1} = \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1}$. Using $\DelayOne{i}{k} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) \leq \DelayTwo{i}{k}$ from~\eqref{equ:lem2_assumption1}, we get
%
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) + \Tmax{i}{1}  \leq \DelayTwo{i}{k} + \Tmax{i}{1} \]
%
and thus
%
\[ \DelayOne{i}{k}  + (\reqservTwo{i}{k} - \reqservOne{i}{k}) + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} \leq \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
which implies
%
\begin{equation}
\label{equ:lem2_case2.1_obj1}
\DelayOne{i}{k+1} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) \leq \DelayTwo{i}{k+1}
\end{equation}
%
Now, we have:
\begin{eqnarray}
\reqservTwo{i}{k+1} - \reqservOne{i}{k+1} & = & \reqrelTwo{i}{k+1} + \Tmax{i}{1} - ( \reqrelOne{i}{k+1} + \Tmax{i}{1}) \nonumber \\
& & \mbox{from~\eqref{equ:lem2_case2.1_serv1} and~\eqref{equ:lem2_case2.1_serv2}}  \nonumber \\
& = & \reqrelTwo{i}{k+1} - \reqrelOne{i}{k+1} \nonumber \\
& = & \reqservTwo{i}{k} + \Delta'_{k+1} - \ConstantOne \nonumber \\
& & \mbox{from~\eqref{equ:lem2_case2_rel}}  \nonumber \\ 
& \leq & \reqservTwo{i}{k} + \Delta'_{k+1} - (\reqservOne{i}{k} + \Delta_{k+1}) \nonumber \\
& & \mbox{from Case~2} \nonumber \\
& \leq & \reqservTwo{i}{k} - \reqservOne{i}{k} + (\Delta'_{k+1} - \Delta_{k+1}) \nonumber \\
& \leq & \reqservTwo{i}{k} - \reqservOne{i}{k} + (h - \AssignmentTwo{i}{k}) \times \TR - (h - \AssignmentOne{i}{k}) \times \TR \nonumber \\
& & \mbox{from the definition of $\Delta_{k+1}$ and $\Delta'_{k+1}$} \nonumber \\
& \leq & \reqservTwo{i}{k} - \reqservOne{i}{k} + (\AssignmentOne{i}{k} - \AssignmentTwo{i}{k}) \times \TR \nonumber \\
& \leq & \reqservTwo{i}{k} - \reqservOne{i}{k} \nonumber \\
& & \mbox{since $\AssignmentOne{i}{k} - \AssignmentTwo{i}{k} \leq 0$ from~\eqref{equ:lem2_assumption0}} \nonumber
\end{eqnarray}
Therefore it holds from the above inequality and from~\eqref{equ:lem2_case2.1_obj1} that:
\[ \DelayOne{i}{k+1} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) \leq \DelayTwo{i}{k+1} \]
which satisfies~\eqref{equ:lem2_obj1}.\\

\noindent\textbf{Case 2.2:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$ \\

\textit{Proof of~\eqref{equ:lem2_obj2}:} From~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo}, we get $\reqservOne{i}{k+1} = \reqrelOne{i}{k+1} + \Tmax{i}{1}$ and $\reqservTwo{i}{k+1} = \ConstantTwo$. We thus get $\reqservOne{i}{k+1} \leq \reqservTwo{i}{k+1}$, which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. \\

\textit{Proof of~\eqref{equ:lem2_obj1}:} We use proof by contradiction. Suppose that Inequality~\eqref{equ:lem2_obj1} is \emph{not} satisfied, we must have:
%
\[ \DelayOne{i}{k+1} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) > \DelayTwo{i}{k+1} \]
%
and thus,
%
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
which can be re-written as 
%
\[ \DelayOne{i}{k} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} - \reqrelTwo{i}{k+1} \]
%
and since it holds from~\eqref{equ:lem2_case2_rel} that $\reqrelTwo{i}{k+1} = \reqservTwo{i}{k+1} + \Delta'_{k+1} \geq \reqrelOne{i}{k+1} = \ConstantOne$ and $\ConstantOne \geq \reqservOne{i}{k} + \Delta_{k+1}$ from Case~2, we get
%
\[ \DelayOne{i}{k} - (\reqservOne{i}{k} + \Delta_{k+1}) > \DelayTwo{i}{k} - (\reqservTwo{i}{k+1} + \Delta'_{k+1}) \]
and thus
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k}) + (\Delta'_{k+1} - \Delta_{k+1}) > \DelayTwo{i}{k} \]
As seen at the end of Case~2.1, we have $\Delta'_{k+1} - \Delta_{k+1} \leq 0$ from~\eqref{equ:lem2_assumption0} and from the definitions of $\Delta_{k+1}$ and $\Delta'_{k+1}$, and thus it holds that
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k}) > \DelayTwo{i}{k} \]
which contradicts~\eqref{equ:lem2_assumption1}.
This contradiction implies that Condition~\eqref{equ:lem2_obj1} is always satisfied. \\ 

\noindent\textbf{Case 2.3:} $\ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$ \\

\textit{Proof of~\eqref{equ:lem2_obj2}:} From~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo}, we get $\reqservOne{i}{k+1} = \reqservTwo{i}{k+1} = \ConstantTwo$ and it immediately follows that $\reqservOne{i}{k+1} \leq \reqservTwo{i}{k+1}$, which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. \\

\textit{Proof of~\eqref{equ:lem2_obj1}:} The proof is identical to the proof of~\eqref{equ:lem2_obj1} in Case~2.2 and is repeated here only for completeness. We use proof by contradiction. Suppose that Inequality~\eqref{equ:lem2_obj1} is \emph{not} satisfied, we must have:
%
\[ \DelayOne{i}{k+1} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) > \DelayTwo{i}{k+1} \]
%
and thus,
%
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
which can be re-written as 
%
\[ \DelayOne{i}{k} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} - \reqrelTwo{i}{k+1} \]
%
and since it holds from~\eqref{equ:lem2_case2_rel} that $\reqrelTwo{i}{k+1} = \reqservTwo{i}{k+1} + \Delta'_{k+1} \geq \reqrelOne{i}{k+1} = \ConstantOne$ and $\ConstantOne \geq \reqservOne{i}{k} + \Delta_{k+1}$ from the Case~2, we get
%
\[ \DelayOne{i}{k} - (\reqservOne{i}{k} + \Delta_{k+1}) > \DelayTwo{i}{k} - (\reqservTwo{i}{k+1} + \Delta'_{k+1}) \]
and thus
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k}) + (\Delta'_{k+1} - \Delta_{k+1}) > \DelayTwo{i}{k} \]
As seen at the end of Case~2.1, we have $\Delta'_{k+1} - \Delta_{k+1} \leq 0$ from~\eqref{equ:lem2_assumption0} and from the definitions of $\Delta_{k+1}$ and $\Delta'_{k+1}$, and thus it holds that
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k}) > \DelayTwo{i}{k} \]
which contradicts~\eqref{equ:lem2_assumption1}.
This contradiction implies that Condition~\eqref{equ:lem2_obj1} is always satisfied. \\



\noindent\textbf{Case 3:} $\ConstantOne \leq \reqservOne{i}{k} + \Delta_{k+1} \leq \reqservTwo{i}{k} + \Delta'_{k+1}$ \\

In this case, we get from~\eqref{equ:lem2_reqrelOne} and~\eqref{equ:lem2_reqrelTwo}, $\reqrelOne{i}{k+1} = \reqservOne{i}{k} + \Delta_{k+1}$ and $\reqrelTwo{i}{k+1} =\reqservTwo{i}{k} + \Delta'_{k+1}$ and thus, according to~\eqref{equ:lem2_global_case}, it holds that 
\begin{equation}
\label{equ:lem2_case3_rel}
\reqrelOne{i}{k+1} \leq \reqrelTwo{i}{k+1}
\end{equation}
Again, we need to handle the relation between the service times $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ in the two mappings $\Mapping_i$ and $\Mapping_i'$ and it holds from~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo} that we have three more sub-cases to explore:

\begin{itemize}
 \item{Case 3.1:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ 
 \item{Case 3.2:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$ 
 \item{Case 3.3:} $\ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$
\end{itemize}

\noindent\textbf{Case 3.1:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo$ \\

Proof of~\eqref{equ:lem2_obj2}: 
\noindent From~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo}, we get 
\begin{eqnarray}
\label{equ:lem2_case3.1_serv1} \reqservOne{i}{k+1} & = & \reqrelOne{i}{k+1} + \Tmax{i}{1} \\
\label{equ:lem2_case3.1_serv2} \reqservTwo{i}{k+1} & = & \reqrelTwo{i}{k+1} + \Tmax{i}{1}
\end{eqnarray}

From~\eqref{equ:lem2_case3_rel},~\eqref{equ:lem2_case3.1_serv1} and~\eqref{equ:lem2_case3.1_serv2}, it immediately follows that $\reqservOne{i}{k+1} \leq \reqservTwo{i}{k+1}$, which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$.\\

Proof of~\eqref{equ:lem2_obj1}: 
From~\eqref{equ:lem2_case3.1_serv1} and~\eqref{equ:lem2_case3.1_serv2}, it holds that $\reqservOne{i}{k+1} - \reqrelOne{i}{k+1} = \Tmax{i}{1} = \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1}$. 
Using $\DelayOne{i}{k} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) \leq \DelayTwo{i}{k}$ from~\eqref{equ:lem2_assumption1}, we get
%
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) + \Tmax{i}{1}  \leq \DelayTwo{i}{k} + \Tmax{i}{1} \]
%
and then
%
\[ \DelayOne{i}{k}  + (\reqservTwo{i}{k} - \reqservOne{i}{k}) + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} \leq \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
%
which implies
%
\begin{equation}
\label{equ:lem2_case3.1_obj1}
\DelayOne{i}{k+1} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) \leq \DelayTwo{i}{k+1}
\end{equation}
%
Now, we have:
\begin{eqnarray}
\reqservTwo{i}{k+1} - \reqservOne{i}{k+1} & = & \reqrelTwo{i}{k+1} + \Tmax{i}{1} - ( \reqrelOne{i}{k+1} + \Tmax{i}{1}) \nonumber \\
& & \mbox{from~\eqref{equ:lem2_case3.1_serv1} and~\eqref{equ:lem2_case3.1_serv2}}  \nonumber \\
& = & \reqrelTwo{i}{k+1} - \reqrelOne{i}{k+1} \nonumber \\
& & \mbox{From ~\eqref{equ:lem2_reqrelOne} and ~\eqref{equ:lem2_reqrelTwo} and Case~3} \nonumber \\
& = & \reqservTwo{i}{k} + \Delta'_{k+1} - (\reqservOne{i}{k} + \Delta_{k+1}) \nonumber \\
& = & \reqservTwo{i}{k} - \reqservOne{i}{k} + (\Delta'_{k+1} - \Delta_{k+1}) \nonumber \\
& \leq & \reqservTwo{i}{k} - \reqservOne{i}{k} \nonumber \\
& & \mbox{because $\Delta'_{k+1} - \Delta_{k+1} \leq 0$ from~\eqref{equ:lem2_assumption0} and the definitions of $\Delta_{k+1}$ and $\Delta'_{k+1}$} \nonumber
\end{eqnarray}
Therefore, it holds from the above inequality and from~\eqref{equ:lem2_case3.1_obj1} that:
\[ \DelayOne{i}{k+1} + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) \leq \DelayTwo{i}{k+1} \]
which satisfies~\eqref{equ:lem2_obj1}.\\


\noindent\textbf{Case 3.2:} $\reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \ConstantTwo \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$ \\


\paragraph{Proof of~\eqref{equ:lem2_obj2}}
From~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo}, we get $\reqservOne{i}{k+1} = \reqrelOne{i}{k+1} + \Tmax{i}{1}$ and $\reqservTwo{i}{k+1} = \ConstantTwo$. We thus get $\reqservOne{i}{k+1} \leq \reqservTwo{i}{k+1}$, which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. 

\paragraph{Proof of~\eqref{equ:lem2_obj1}} The proof is by contradiction. If Inequality~\eqref{equ:lem2_obj1} is \emph{not} satisfied then we must have:
\[ \DelayOne{i}{k+1}  + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) > \DelayTwo{i}{k+1} \]
and thus,
\[ (\DelayOne{i}{k} + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1}) + (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
which can be re-written as 
\[ \DelayOne{i}{k} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} - \reqrelTwo{i}{k+1} \]
By replacing the values of $\reqrelOne{i}{k+1}$ and $\reqrelTwo{i}{k+1}$ from Case~3, we have
\[ \DelayOne{i}{k} - (\reqservOne{i}{k} + \Delta_{k+1}) > \DelayTwo{i}{k} - (\reqservTwo{i}{k} + \Delta'_{k+1}) \]
and thus,
\[ \DelayOne{i}{k} + (\reqservTwo{i}{k} - \reqservOne{i}{k}) + (\Delta'_{k+1} - \Delta_{k+1}) > \DelayTwo{i}{k} \]
and since $\Delta'_{k+1} - \Delta_{k+1} \leq 0$ from~\eqref{equ:lem2_assumption0} and the def. of $\Delta_{k+1}$ and $\Delta'_{k+1}$, it holds that
\[\DelayOne{i}{k} + \reqservTwo{i}{k} - \reqservOne{i}{k} > \DelayTwo{i}{k} \]
which contradicts~\eqref{equ:lem2_assumption1}. This contradiction implies that Equation~\eqref{equ:lem2_obj1} is satisfied. \\

\noindent\textbf{Case 3.3:} $\ConstantTwo \leq \reqrelOne{i}{k+1} + \Tmax{i}{1} \leq \reqrelTwo{i}{k+1} + \Tmax{i}{1}$

\paragraph{Proof of~\eqref{equ:lem2_obj2}}
\noindent From~\eqref{equ:lem2_reqservOne} and~\eqref{equ:lem2_reqservTwo}, we get $\reqservOne{i}{k+1} = \reqservTwo{i}{k+1} = \ConstantTwo$ and it immediately follows that $\reqservOne{i}{k+1} \leq \reqservTwo{i}{k+1}$, which satisfies~\eqref{equ:lem2_obj2} since $\AssignmentOne{i}{k+1} = \AssignmentTwo{i}{k+1} = h$. 

\paragraph{Proof of~\eqref{equ:lem2_obj1}} The proof is by contradiction. 
If Inequality~\eqref{equ:lem2_obj1} is \emph{not} satisfied, we must have:
\[ \DelayOne{i}{k+1} +  (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1})  > \DelayTwo{i}{k+1} \]
and thus,
\[ \DelayOne{i}{k} +  (\reqservTwo{i}{k+1} - \reqservOne{i}{k+1}) + \reqservOne{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} + \reqservTwo{i}{k+1} - \reqrelTwo{i}{k+1} \]
By replacing $\reqservOne{i}{k+1}$ and $\reqservTwo{i}{k+1}$ with their values, we get
\[ \DelayOne{i}{k}+  ( \ConstantTwo  -  \ConstantTwo  ) +  \ConstantTwo - \reqrelOne{i}{k+1} > \DelayTwo{i}{k} +  \ConstantTwo - \reqrelTwo{i}{k+1} \]
and thus,
\[\DelayOne{i}{k} + \reqrelTwo{i}{k+1} - \reqrelOne{i}{k+1} > \DelayTwo{i}{k}   \]
By replacing the values of $\reqrelOne{i}{k+1}$ and $\reqrelTwo{i}{k+1}$ from Case~3, we have
\[\DelayOne{i}{k} + (\reqservTwo{i}{k} + \Delta'_{k+1}) - (\reqservOne{i}{k} + \Delta_{k+1})  > \DelayTwo{i}{k}   \]
which gives
\[\DelayOne{i}{k} + (\reqservTwo{i}{k} -  \reqservOne{i}{k}) + (\Delta'_{k+1} - \Delta_{k+1})  > \DelayTwo{i}{k}   \]
and since $\Delta'_{k+1} - \Delta_{k+1} \leq 0$ from~\eqref{equ:lem2_assumption0} and the def. of $\Delta_{k+1}$ and $\Delta'_{k+1}$, it holds that
\[\DelayOne{i}{k} + \reqservTwo{i}{k} - \reqservOne{i}{k} > \DelayTwo{i}{k} \]
which contradicts~\eqref{equ:lem2_assumption1}. This contradiction implies that Equation~\eqref{equ:lem2_obj1} is satisfied.\qed
\end{proof}

%\section{Other Experimental Results} 
%The objective of the section is to demonstrate the applicability of the proposed algorithms to other benchmarks in the test suite. 
%
%\subsection{Comparison of Arbitration mechanisms}
%\begin{figure}[htb]
%\centering
%\includegraphics[width=0.9\columnwidth]{arbitercompare2014-crop.pdf}
%\caption{Increase in WCET for different arbitration mechanisms.} 
%\label{fig:arbiters1}
%\end{figure}
%
%
%We extend the range of benchmarks covered by the application and present some more results in this section.
%The results of the experiments applied to the new set of benchmarks are shown in Figure~\ref{fig:arbiters1},
%where tasks are arranged in descending order of priorities
%(\emph{jpegdecode} has the highest priority) for the case of
%fixed-priority arbitration. As expected, for the fixed-priority scheduler the task with the highest
%priority experiences no interference (an increase factor of 1x)
%from the other tasks.  We observe a counter-intuitive effect in that
%\emph{h263decode} (priority 2) experiences a larger increase in WCET
%than the lower priority tasks. This is because \emph{h263decode} has
%higher request density than the two lower priority tasks, implying
%that it is more memory intensive. Despite having lower delay
%per memory access due to the higher priority, this results in
%higher impact of the cumulative delay on the increase factor. For the unspecified work-conserving arbiter, the requests of a given task may be blocked by all requests from all concurrently executing
%tasks.  Such a mechanism hence leads to a very pessimistic WCET, as
%seen in the figure. Note that this arbitration mechanism is equivalent
%to fixed-priority arbitration where every task is assumed to have the
%lowest priority. This can be seen in Figure~\ref{fig:arbiters1}, where
%the lowest priority task, \emph{adpcmdecode}, has the same WCET with
%fixed-priority arbitration and the unspecified work-conserving
%arbiter. Unlike the previous two arbiters, TDM is neither priority-based, nor
%work conserving. Here, it is configured with a frame size of 24 and
%each of the four cores is allocated 6 slots. We note from the results that TDM arbitration
%performs remarkably well compared to fixed-priority arbitration, as only
%the highest priority task has a smaller increase factor using
%fixed-priorities. 
%
%
%
%
%
%
%\begin{table} [h!]
%\caption{Application of Fixed Priority to Another Set of Benchmarks}
%\rowcolors{3}{gray!35}{}
%\begin{tabular}{|c|c|c|c|c}
%\hline 
%Benchmark & epic	&	  jpegdecode & adpcmdecode & adpcmencode \\ \hline 
%Increase Factor & 1.00	&	18.27 & 11.47 &  9.12 \\	\hline 
%Priority & 1 & 2 & 3 & 4  \\ \hline 
%\end{tabular}
%\label{tab:fixprio2}
%\end{table}
 %
%In order to cover the benchmarks suite further, we also applied the fixed priority arbitration
%mechanism to another set of benchmarks which are sampled at 20000 cycles. The results of the experiments are presented in Table~\ref{tab:fixprio2}. Again it can be seen that \emph{jpegdecode}
%which is memory intensive suffers more degradation than the lower priority tasks while accessing the shared bus arbitrated using a fixed priority arbiter. 

%
%\subsection{Application of Sampling Regions to Other Benchmarks }
%
%Amongst the numerous sets of benchmarks that were experimented with, the observations depicted in 
%Table~\ref{tab:SamplingReg} are interesting. They depict a case in which sometimes, the algorithm could not deliver tighter results by sampling at a higher frequency as seen in the case for \emph{jpegencode} and \emph{g721decode}. On examining the logs, it is found that given the distribution of free slots,
%there was no scope for serving requests any faster, for the different sampling scenarios.    
%
%\begin{table} [h!]
%\caption{Comparison of increase in WCET in percent between our unified framework (UF) 
%and the state-of-the-art (SOA)~\cite{SchranWRT}.}
%\rowcolors{3}{gray!35}{}
%\begin{tabular}{|c|c|c|c}
%\hline 
%\emph{Benchmark} & \emph{30K} & \emph{40K} & \emph{50K}  \\ \hline   
%unepic 	         & 1.02    & 1.02     & 1.02       \\  \hline 
%jpegencode       & 21.00     & 26.80     & 27.20     \\  \hline 
%g721decode       & 7.90      & 7.90       & 7.98      \\  \hline 
%g721encode      & 7.5     & 8.6      & 8.8    \\  \hline 
%\end{tabular}
%\label{tab:SamplingReg}
%\end{table}
%\begin{table} [h!]
%\caption{Comparison of increase in WCET in percent between our unified framework (UF) 
%and the state-of-the-art (SOA)~\cite{SchranWRT}.}
%\rowcolors{3}{gray!35}{}
%\begin{tabular}{|c|c|c|c}
%\hline 
%\emph{Benchmark} & \emph{30K} & \emph{40K} & \emph{50K}  \\ \hline   
%unepic 	         & 1.02    & 1.02     & 1.02       \\  \hline 
%jpegencode       & 21.00     & 26.80     & 27.20     \\  \hline 
%g721decode       & 7.90      & 7.90       & 7.98      \\  \hline 
%g721encode      & 7.5     & 8.6      & 8.8    \\  \hline 
%\end{tabular}
%\label{tab:SamplingReg}
%\end{table}
%

\end{document}
