% \begin{itemize}
% \item Properties and observations
% \item algorithm and description
% \item proofs of correctness
% \item complexity analysis
% \end{itemize}



This section proposes a method to find the set of request-to-slot assignments that maximizes the cumulative delay.
In order to eliminate unfeasible assignments that will \emph{provably} not contribute to the global maximum,
the rest of the section presents an important observation and lemma, which eventually forms the basis of the proposed algorithm.

\begin{Observation}
\label{obs:dependency}
Let us assume a sequence of $k$ requests $\{ \request{i}{1}, \request{i}{2}, \ldots, \request{i}{k}\}$ from task $\tau_i$, and a given free-bus-slot assignment $\{ \Assignment{i}{1}, \Assignment{i}{2}, \ldots, \Assignment{i}{k} \}$ corresponding to each of these requests. Let us denote by $\DelayOne{i}{k}$ the \emph{maximum} cumulative delay for these $k$ requests (computed using Lemma~\ref{lem:wccd}). Now, suppose that we extend the sequence with the next request with index $(k+1)$ request assigned to slot $h$, i.e. $\Assignment{i}{k+1} = h$ such that $h > \Assignment{i}{k}$. The maximum cumulative delay $\DelayOne{i}{k+1}$ for the $k+1$ requests can be obtained by simply adding to $\DelayOne{i}{k}$ the maximum delay for that last request $\request{i}{k+1}$. This maximum delay can be obtained using Lemma~\ref{lem:wccd} knowing only the service time of the $k^{th}$ request during the computation of $\DelayOne{i}{k}$.
\end{Observation}

Observation~\ref{obs:dependency} brings about an important property: If request ($k+1$) of task $\tau_i$ is served \emph{before} or \emph{in} slot $h$, then the maximum cumulative delay of the first ($k+1$) requests is the maximum between the maximum delays computed by assuming 
\begin{enumerate}
\item[C1.] the ($k+1$) requests are \emph{all} served \emph{before} slot $h$, and
\item[C2.] the first $k$ requests are served \emph{before} slot $h$ and request ($k+1)$ is served in slot $h$.
\end{enumerate}
% Cases C1 and C2 reveal the recursive relation between the maximum delay $\DelayOne{i}{k+1}$ that can be obtained on considering the first ($k+1$) requests of $\tau_i$ and the first $h$ free bus slots, and on the other hand, the maximum delay $\DelayOne{i}{x}$ that can be obtained when considering the first $x$ requests and the first ($h-1$) free slots. 
Based on Observation~\ref{obs:dependency}, we construct a method to compute $\DelayOne{i}{k}$ from $\DelayOne{i}{k-1}$, $\forall k$, which ultimately yields $\DelayOne{i}{\NbReqPerTask{i}}$. The method is shown in Algorithm~\ref{algo:MaxDelay} and an explanation of its operation is given below. Note that this algorithm is ``safe-by-construction'' as it computes $\DelayOne{i}{\NbReqPerTask{i}}$ by investigating all possible assignments of these $\NbReqPerTask{i}$ requests to free bus slots (only those assignments that are proven unfeasible are discarded).

\newcommand{\UBslotNew}[1]{\operatorname{UBslot}_{#1}}
\newcommand{\Cell}[2]{c(#1, #2)}
\newcommand{\CellElement}[2]{e_{#1, #2}}

\begin{algorithm}[t!]
\scriptsize
\LinesNumbered
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{$\NbReqPerTask{i}$ : number of requests, $\UBslotNew{i}$: last available slot}
\Output{$\DelayOne{i}{\NbReqPerTask{i}}$: maximum cumulative delay incurred by $\tau_i$. \\}
%$\NbReqPerTask{i}$: maximum nb. of requests generated by $\tau_i$, \\
%$\UBslotNew$: upper-bound on the nb. of slots that $\tau_i$ can use.
%$\UBslotNew{i} \leftarrow ...$ \; 
Create a 2D array of $\NbReqPerTask{i}$ rows and $\UBslotNew{i}$ columns, where each cell $\Cell{k}{j}$ at row $k$ and column $j$ is a list of tuples $\CellElement{k}{j}$ as explained in the description. \label{algo:line_table_create} \;
Set every cell of this array to an empty list \;
\For{$k \leftarrow 1$ \KwTo $\NbReqPerTask{i}$}{ \tcp{For each request}
	\For{$j \leftarrow k$ \KwTo $\UBslotNew{i} - (\NbReqPerTask{i} - k)$}{\tcp{all potential slots}
		\eIf{$k = 1$}{
			\lIf{$j > 1$}{$\Cell{k}{j} \leftarrow \Cell{k}{j-1}$\;} %\lnl{algo:line_j_1_start}
			$\reqrel{i}{k} \leftarrow \Tmin{i}{j - 1} + 1$\;
			\tcp{we assume $\Tmin{i}{0} = 0$}
			\If{$\reqrel{i}{k} < C_i$}{
				$\reqserv{i}{k} \leftarrow \min(\Tmax{i}{j}, \reqrel{i}{k} + \Tmax{i}{1})$\;			 
				$\DelayOne{i}{k} \leftarrow \reqserv{i}{k} - \reqrel{i}{k}$ \; %\lnl{algo:line_j_1_end}
				$\Cell{k}{j}.\operatorname{add}(\left\langle \DelayOne{i}{k} , j, \reqserv{i}{k} \right\rangle)$\;
			}
		}
		{ %\lnl{algo:line_kj_n_start}
			$\Cell{k}{j} \leftarrow \Cell{k}{j-1}$\; 
			\tcp{$\Cell{k}{j-1} = \phi$ if $j = k$} %\lnl{algo:line_kj_n_mid}
			\ForEach{$\CellElement{k-1}{j-1} \in \Cell{k-1}{j-1}$}{ 
				\tcp{$\CellElement{k-1}{j-1} = \left\langle \DelayOne{i}{k-1}, \Assignment{i}{k-1}, \reqserv{i}{k-1} \right\rangle$}
				$\reqrel{i}{k} \leftarrow \max( \Tmin{i}{j - 1} + 1, \reqserv{i}{k-1} + (j - \Assignment{i}{k-1}) )$\;
				\If{$\reqrel{i}{k} < C_i + \DelayOne{i}{k-1}$}{
					$\reqserv{i}{k} \leftarrow \min( \Tmax{i}{j}, \reqrel{i}{k} + \Tmax{i}{1} )$\;
					$\DelayOne{i}{k} \leftarrow  \DelayOne{i}{k-1} + \reqserv{i}{k} - \reqrel{i}{k}$ \; %\lnl{algo:line_kj_n_end}
					$\Cell{k}{j}.\operatorname{add}(\left\langle \DelayOne{i}{k}, j, \reqserv{i}{k} \right\rangle)$\;
				}
			}
		} 
	}
} %\lnl{algo:line_return}
\Return $\max_{\CellElement{\NbReqPerTask{i}}{\UBslotNew{i}} \in \Cell{\NbReqPerTask{i}}{\UBslotNew{i}}} \DelayOne{i}{\NbReqPerTask{i}}$ \;
%\tcp{$\CellElement{\NbReqPerTask{i}}{\UBslotNew{i}} = \left\langle \DelayOne{i}{\NbReqPerTask{i}} , \reqserv{i}{\NbReqPerTask{i}}, \Assignment{i}{\UBslotNew{i}} \right\rangle$}\;
\caption{MaxRegDelay($\NbReqPerTask{i}$, $\UBslotNew{i}$)}   
\label{algo:MaxDelay}
\end{algorithm}

%Create a table of $\NbReqPerTask{i}$ rows and $UBslot$ columns, where each cell $\Cell{k}{j}$ at row $k$ and column $j$ is a list of tuples $\CellElement{k}{j} = \left\langle \DelayOne{i}{k}, \Assignment{i}{k}, \reqservOne{i}{k} \right\rangle$. Each cell gives the list of all maximum delays that can be obtained by assigning the first $k$ requests of $\tau_i$ to free bus slots within the $k$'th and $j$'th free bus slot, and for each of these delays $\DelayOne{i}{k}$, the attached value $\Assignment{i}{k}$ and $\reqservOne{i}{k}$ give the free bus slot in which the $k$'th request has been served ($\Assignment{i}{k} \in [k,j]$), and the absolute time at which it has been served, to reach that delay $\DelayOne{i}{k}$.

% 
% The algorithm starts by computing an upper-bound $\UBslotNew{i}$ on the latest free bus slot that the last request of $\tau_i$ can possibly use. Since $\Tmax{i}{1}$ is the largest delay that a request can incur before getting access to the bus, it is easy to see that the maximum execution time of $\tau_i$ is given by $C_i + \NbReqPerTask{i} \times \Tmax{i}{1}$ and thus, the latest free bus slot that $\tau_i$ may use is the $\UBslotNew{i}$'th one, where $\UBslotNew{i} = ...$.
%At line~\ref{algo:line_table_create}, the algorithm creates a table of $\NbReqPerTask{i}$ rows and $\UBslotNew{i}$ columns. 
\paragraph*{Algorithm Description} All the request-to-slot assignments are captured in a two-dimensional array with $\NbReqPerTask{i}$ rows and $\UBslotNew{i}$ columns.
The input to the algorithm is the number of requests, and the upper bound on the available slots. 
Note that the variables $k$ and $j$ are used to refer to requests and slots, respectively.
The algorithm proceeds in a row-wise manner, by assigning request 1 to all feasible slots and computing the cumulative delays and then proceeding to analyze request 2 (next row of the array) and so on.
Each cell $\Cell{k}{j}$ of this array holds a list of tuples $\CellElement{k}{j} = \left\langle \DelayOne{i}{k}, \Assignment{i}{k}, \reqservOne{i}{k} \right\rangle$, where each tuple $\CellElement{k}{j}$ in that list reflects a feasible assignment of the first $k$ requests to $k$ free bus slots within the range $[1,j]$. The members of this tuple denote: (i) the maximum delay $\DelayOne{i}{k}$ that can be obtained with the corresponding assignment, (ii) the free bus slot in which the $k$'th request has been served to reach that maximum delay $\DelayOne{i}{k}$, i.e. $\Assignment{i}{k} \in [k,j]$, and (iii) the corresponding time $\reqservOne{i}{k}$ at which that $k$'th request has actually been served in that slot. 
%Each cell $\Cell{k}{j}$ stores the list of \emph{all} possible delays $\DelayOne{i}{k}$ and their corresponding values of $\Assignment{i}{k}$ and $\reqservOne{i}{k}$.

For the first request and first slot, the algorithm computes \emph{the} worst-case delay when the first request is assigned to the first free bus slot (Lines 7, 9, 10, and 11). To do so, it uses Lemma~\ref{lem:wccd} and adds the corresponding tuple $\CellElement{1}{1}$ to the list of cell $\Cell{1}{1}$, in this case $\CellElement{1}{1} = \left\langle \Tmax{i}{j}, 1, \Tmax{i}{j} \right\rangle$. The list contains only this tuple.

For $k=1$ and  $j>1$, the algorithm computes \emph{all} the maximum delays by considering every assignment of request 1 to free bus slots $\leq j$. First,
the list of the current cell $\Cell{1}{j}$ is initialized to the list of the previous cell $\Cell{1}{j-1}$ (Line~6), thereby carrying on all the possible worst-case delays that were obtained when this first request was assigned to a previous free bus slot $< j$. Then, the algorithm addresses the case where the first request is assigned to the $j$'th bus slot: it makes use of the equations of Lemma~\ref{lem:wccd} to compute $\reqrel{i}{1}$ and $\reqserv{i}{1}$ and appends the corresponding tuple $\CellElement{1}{j}$ to the list of cell $\Cell{1}{j}$ (lines 7, 9, 10, and 11).

Any two requests belonging to task of length $C_i$ cannot have their release times separated by more than the time $C_i$. The addition of the "if-statement" at Line~8 filters out a considerable number of unfeasible solutions as $j$ gets larger. It ensures that any partial solution in which the first request is released after the task has run for $C_i$ time units is immediately discarded, thereby pruning the search space by eliminating all solutions that start with this first erroneous free-bus-slot assignment $\Assignment{i}{1} > C_i$ as soon as they are detected. 

When $k>1$ and $j\geq k$, the algorithm computes \emph{all} worst-case delays that can be obtained when the first $k$ requests of $\tau_i$ can be assigned to any free bus slots within $[k,j]$. On Line~13, the algorithm initializes the list of cell $\Cell{k}{j}$ to the list of results obtained for the cell $\Cell{k}{j-1}$. Informally, this reflects case C1 above, which states that the worst-case cumulative delay of the first $k$ requests may be found in the set of maximum delays obtained when these $k$ requests are \emph{all} served \emph{before} the $j$'th free bus slot. Then on Line~14, the algorithm inspects every maximum delay that has been obtained assuming that the first $k-1$ requests were served \emph{before} the $j$'th free bus slot. For each of these delays $\DelayOne{i}{k-1}$, assuming that the $k$'th request is now served in the $j$'th free bus slot, lines 15 and 17 compute the release and service time of that request $\request{i}{k}$ using the equations of Lemma~\ref{lem:wccd}, by 
referring to the corresponding request-to-free-bus-slot assignment $\Assignment{i}{k-1}$ of the ($k-1$)'th request, as well as its service time $\reqserv{i}{k-1}$ in this free bus slot $\Assignment{i}{k-1}$. This reflects case C2 presented above, as it gives a corresponding maximum delay $\DelayOne{i}{k}$ for the first $k$ requests assuming that request $\request{i}{k}$ is assigned to the $j^{th}$ free slot and the previous $k-1$ requests are served in the earlier bus slots. The filter at Line~16 is similar to the one at Line~8 to filter out a host of infeasible solutions. Here we consider the maximum delay $\DelayOne{i}{k-1}$ that $\tau_i$ may have incurred due to interference with the first $(k-1)$ requests).

Note that $k$ spans from $1$ to $\NbReqPerTask{i}$, while $j$ takes all values within $[k, \UBslotNew{i} - (\NbReqPerTask{i} - k)]$. The reason for limiting the range of $j$ is because the $k$'th request of $\tau_i$ cannot possibly be served in a free bus slot $\leq k$ (leading to the lower bound $j\geq k$) and the next $(\NbReqPerTask{i} - k)$ requests following $\request{i}{k}$ require at least $(\NbReqPerTask{i} - k)$ slots in order to be served (leading to the upper bound $j \leq \UBslotNew{i} - (\NbReqPerTask{i} - k)$).

% In terms of complexity, Algorithm~\ref{algo:MaxDelay} is non-polynomial as it carries on all the possible scenarios/free-bus-slot assignments and their associated maximum delays, from the first cell $\Cell{1}{1}$ to the last one, $\Cell{\NbReqPerTask{i}}{\UBslotNew{i}}$. Therefore, the max operator of the last line~\ref{algo:line_return} goes through all of these scenarios, which confers a non-polynomial complexity on the algorithm. In order to reduce this complexity, we shall use a property described in the following lemma that enables us to considerably reduce the size of the list of tuples computed for each cell. 

% By nailing down the scenarios that cannot possibly lead to a worst-case delay, and by discarding them at an early stage, these scenarios will not propagate from one cell to another (see lines~\ref{algo:line_j_n_start} and~\ref{algo:line_kj_n_start}) and will not give rise to a massive collection of subsequent scenarios that are anyway doomed. The purpose of pruning the solution tree at each 
% iteration, and thus narrowing the set of candidate solutions for the worst-case delay, somewhat contains the combinatorial explosion within a reasonable amount of scenarios to be investigated.

\paragraph*{Elimination of unfeasible assignments}
\label{sec:ListReduce}
Given a set of possible request-to-slot assignments, the following lemma provably determines the mappings that cannot possibly lead to the global worst-case delay. By discarding them at an early stage, they are not propagated as the analysis progresses, restricting the number of assignments that must be handled. The purpose of pruning the solution tree in each iteration is to increase the efficiency of the algorithm and improve its scalability with respect to the number of requests and potential free slots. 
\begin{lemma}
\label{lem:pruning_solution}
Let $A = \{ \AssignmentOne{i}{1}, \ldots, \AssignmentOne{i}{x} \}$ refer to a list of free-bus-slot assignments for the first $x$ requests of task $\tau_i$. Let $\DelayOne{i}{x}$ be the maximum cumulative delay for these $x$ requests considering this assignment $A$, and let $\reqservOne{i}{x}$ be the \emph{absolute} time at which the $x$'th request is served in a scenario leading to this delay $\DelayOne{i}{x}$. Similarly, let $A' = \{ \AssignmentTwo{i}{1}, \ldots, \AssignmentTwo{i}{x} \}$ denote another list of free-bus-slot assignments for the first $x$ requests of task $\tau_i$. Let $\DelayTwo{i}{x}$ be the maximum cumulative delay considering this assignment list $A'$, and let $\reqservTwo{i}{x}$ be the \emph{absolute} time at which the $x$'th request is served in a scenario leading to this delay $\DelayTwo{i}{x}$. If it holds that

\vspace{-3mm}
\begin{footnotesize}
\begin{eqnarray}
\label{equ:lem_assumption0} & \AssignmentOne{i}{x} & \leq \AssignmentTwo{i}{x} \\
\label{equ:lem_assumption1} \text{and } & \DelayOne{i}{x} & \leq \DelayTwo{i}{x} \\
\label{equ:lem_assumption2} \text{and } & \reqservOne{i}{x} + (\AssignmentTwo{i}{x} - \AssignmentOne{i}{x}) & \geq \reqservTwo{i}{x}
\end{eqnarray}
\end{footnotesize}
then for all $h > \AssignmentTwo{i}{x}$, assigning request $\request{i}{x+1}$ to the $h$'th free bus slot, i.e. $\AssignmentOne{i}{x+1} = \AssignmentTwo{i}{x+1} = h$, leads to
\begin{footnotesize}
\begin{eqnarray}
\label{equ:lem_obj0} & \AssignmentOne{i}{x+1} & = \AssignmentTwo{i}{x+1} \\
\label{equ:lem_obj1} \text{and } & \DelayOne{i}{x+1} & \leq \DelayTwo{i}{x+1} \\
\label{equ:lem_obj2} \text{and } & \reqservOne{i}{x+1} + (\AssignmentTwo{i}{x+1} - \AssignmentOne{i}{x+1}) & \geq \reqservTwo{i}{x+1}
\end{eqnarray}
\end{footnotesize}
The vital inference from the above observations is that the maximum cumulative delay for the first ($x+1$) requests of $\tau_i$ is higher, and the service time of the ($x+1$)'th request smaller, by using the assignment corresponding to list $A'$ for the first $x$ requests (instead of the assignments in list A). Note that since Conditions~\eqref{equ:lem_obj0},~\eqref{equ:lem_obj1}, and~\eqref{equ:lem_obj2} are the same as~\eqref{equ:lem_assumption0},~\eqref{equ:lem_assumption1}, and~\eqref{equ:lem_assumption2}, the lemma continues to hold for all the subsequent requests $> x+1$.
%Note that, if Inequalities~\eqref{equ:lem_obj0},~\eqref{equ:lem_obj1}, and~\eqref{equ:lem_obj2} are satisfied then Inequalities~\eqref{equ:lem_assumption0},~\eqref{equ:lem_assumption1}, and~\eqref{equ:lem_assumption2} are automatically re-satisfied as well for the first ($x+1$) requests.
\end{lemma}

Due to space limitations, please refer to the appendix for the proof.

In order to leverage the result of Lemma~\ref{lem:pruning_solution}, we can add a function $\operatorname{ListReduce(\Cell{k}{j})}$ at the end of the first inner loop, i.e., ``for $j \leftarrow k$ to $\UBslotNew{i} - (\NbReqPerTask{i} - k)$'' in Algo.~\ref{algo:MaxDelay}. This function makes sure that $\nexists$ two distinct tuples $\CellElement{k}{j}^1$ and $\CellElement{k}{j}^2$ in the list of $\Cell{k}{j})$ such that 
\begin{footnotesize}
\begin{eqnarray}
\Assignment{i}{k}^1 & \leq & \Assignment{i}{k}^2 \nonumber \\
\DelayOne{i}{k}^1 & \leq & \DelayOne{i}{k}^2 \nonumber \\
\reqserv{i}{k}^1 + (\Assignment{i}{k}^2 - \Assignment{i}{k}^1) & \geq & \reqserv{i}{k}^2 \nonumber
\end{eqnarray}
\end{footnotesize}
Each time such a pair of tuples is found, only the tuple $\CellElement{k}{j}^2$ is kept and the tuple $\CellElement{k}{j}^1$ is discarded.
This is a key addition to the algorithm that \emph{significantly} reduces the number of tuples in $\Cell{k}{j})$.
We later return to experimentally evaluate the benefits of this elimination in Section~\ref{sec:experiments}.


% It has been noticed in our experimental setup that some tasks are not analyzable with the brute force approach (without this addition) on a regular desktop machine as they
% can consume the system memory in few seconds. With this addition, there was a noticeable speed up in the analysis. 

