%\documentclass[copyright,creativecommons,noncommercial]{eptcs}
\documentclass[11pt]{article}
\bibliographystyle{alpha}
\providecommand{\event}{} % Name of the event you are submitting to
%\documentclass[runningheads,10pt]{llncs}
\usepackage{etex}
\usepackage{amsthm}
\usepackage[leqno]{amsmath}
\usepackage{easybmat}
\usepackage{amssymb}
%\usepackage{work} %Neil's definitions
\usepackage{fancyvrb}
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage{tikz}
\usepackage{pgfplots}
\usepackage{xstring}
\usepackage{ifthen}
\usepackage{calc}
\usepackage{tikz-uml}
\usepackage[titletoc]{appendix}
\usepackage[margin=3.5cm]{geometry}

% DIMENSION OF TEXT:
%\textwidth 140mm         % Width of text line.

% ORDINARY DEFINITIONS 

\newtheorem{theorem}{THEOREM}[section]
\newtheorem{obs}[theorem]{OBSERVATION}
\newtheorem{corol}[theorem]{COROLLARY}
\newtheorem{lemma}[theorem]{LEMMA}
\newtheorem{claim}[theorem]{CLAIM}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{construction}[theorem]{Construction}
\newtheorem{alg}{Algorithm}[section]

\newtheorem{exmp}{Example}[section]

% DEFINITIONS SPECIAL TO THIS TEXT

\renewcommand{\vec}{\mathaccent"017E}  %LNCS defines \vec as boldface italic, fy!
\newcommand{\deptypes}{{\mathbb{D}}}
\newcommand{\deps}{{\mathbb{F}}}
% \newcommand{\unarydeps}{{\mathbb{U}}}
% \newcommand{\binarydeps}{{\mathbb{B}}}
\newcommand{\unarydep}[3]{{#1\xrightarrow{\makebox[1ex][l]{$\scriptstyle #2$}} #3}}
\newcommand{\longunarydep}[3]{{#1\xrightarrow{#2} #3}}
%\newcommand{\unarydep}[3]{{#1\stackrel{#2}{\rightarrow} #3}}
\newcommand{\binarydep}[4]{{{#1}\atop{#2}}\rightrightarrows {{#3}\atop{#4}}}
%\newcommand{\meetdep}[3]{{{#1}\atop{#2}}\rightrightarrows {#3}}
\newcommand{\dep}[3]{{\tt#1}\stackrel{#2}{\to}{\tt#3}}  %OLD DEF
\newcommand\scc{\mbox{\rm SCC}}
\newcommand\ep[1]{{\mathbf #1}}
\newcommand\safe[1]{{{\cal S} #1}}
\newcommand\xmin{{x_{\it min}}}
\newcommand\ymin{{y_{\it min}}}
\newcommand\zmin{{z_{\it min}}}
\newcommand{\mytag}[1]{\tag{#1}}
\newcommand\twodots{..}
\newcommand\Lbjk{L_{\text{BJK}}}

% OTHER DEFS (mostly from NDJ)

\newcommand\st{\colon} %such that
%\newcommand\setminus{-}  %any better idea?
\newcommand{\pass}{\texttt{:=}\,}
\newcommand{\semi}{\texttt{;}}
\newcommand{\X}{\texttt{X}}
\newcommand{\Y}{\texttt{Y}}
\newcommand{\Z}{\texttt{Z}}
\newcommand{\C}{\texttt{C}}
\newcommand{\U}{\texttt{U}}
\newcommand{\N}{\texttt{N}}
\newcommand{\lsem}{\mbox{$\lbrack\hspace{-0.3ex}\lbrack$}}
\newcommand{\rsem}{\mbox{$\rbrack\hspace{-0.3ex}\rbrack$}}
\newcommand{\sempar}[1]{\mbox{\lsem\pgt{#1}\rsem}}
\newcommand{\ints}{\mathbb{Z}}
\newcommand{\nats}{\mathbb{N}}
\newcommand{\pgt}[1]{{\tt #1}}
\newcommand{\inds}{V}

\newcommand{\bthm}{\begin{theorem}}
\newcommand{\ethm}{\end{theorem}}
\newcommand{\blem}{\begin{lemma}}
\newcommand{\elem}{\end{lemma}}
\newcommand{\bprf}{\begin{proof}}
\newcommand{\eprf}{\end{proof}}
\newcommand{\bdfn}{\begin{definition}}
\newcommand{\edfn}{\end{definition}}
\newcommand{\be}{\begin{enumerate}}
\newcommand{\ee}{\end{enumerate}}

%commands from BJK (CiE08):
\let\mprod=\cdot   %matrix product for SOM
\let\altprod=\otimes   %the other product (for SOP)
\let\diag=\nabla   %symbol for taking the diagonal of a matrix (ad hoc)
\let\dfrle=\preceq   %order for DFRs
\let\dfrge=\succeq
\let\dfrlt=\prec
\newcommand{\uop}[1]{C_2(#1)}   %all 2-element subsets of
\newcommand{\arcsof}{{\bf A}}
\newcommand{\onesof}{{\bf A^{1}}}
\newcommand\matrices{{{\deptypes}^{n\times n}}}
\newcommand\dfrs{{\mathbb{DFR}_{n}}}

\let\conforms = \sqsubseteq
\newcommand\altmodels{\mathrel{{|}\mkern-3mu{\equiv}}}
\let\elementary=\trianglelefteq
\newcommand\monus{\dot-}

%commands specific to this document
\newcommand{\CPE}{\mathcal{E}}
\newcommand{\ID}{\mathit{Id}}
\newcommand{\CoreReset}{\emph{CoreReset}\;}
\newcommand{\Algorithms}{\emph{Algorithms}\;}

\begin{document}
%LNCS stuff
% \authorrunning{Mann}
% \titlerunning{Automatic Complexity Analysis of Simple Imperative Programs}

\title{Automatic Complexity Analysis of Simple Imperative Programs}
\author{Author: Zachi Mann \\
        Supervisor: Prof. Amir Ben Amram \\ \\ \\ \\
Project submitted in partial fulfillment of the requirements for an \\
MSc degree in Computer Science, \\
at the \\
Academic College of Tel-Aviv Yaffo \\ \\ \\ \\}
%\institute{School of Computer Science, Tel-Aviv Academic College}

\maketitle   
\newpage
\tableofcontents
\newpage
\section{Introduction}
This document describes a set of algorithms designed to solve an intriguing question:
given a computer program, what is its complexity class ? I.e., is it linear, polynomial or exponential?
For Turing-complete languages this property is proven to be undecidable, hence the algorithms target a certain 
abstracted "core" language which is not Turing complete but is still interesting enough to be discussed.
This core language was first described in ~\cite{BJK08} and in ~\cite{BA10} it was extended with a command that resets variables to zero (\verb+X := 0+).
The analysis algorithms described in this document apply the inference rules of ~\cite{BA10}, but rely heavily on ~\cite{BJK08} when it comes to the data structure and implementation.
\section{Problem Statement}
\label{sec:goals}

First, we define our core language, $L_r$.

\paragraph{Syntax} is described in figure \ref{fig:core-lang-syntax} and should be self-explanatory.
In a command $\verb+loop+\; \X \; \verb+{C}+$,
variable $\X$
is not allowed to appear on the left-hand side of an assignment in the loop
body {\tt C}. There is no special syntax for a ``program.'' %which is just a command.

\begin{figure}[t]
$$ \renewcommand{\arraystretch}{1.3}
\begin{array}{rcl}
\verb+X+,\verb+Y+\in\mbox{Variable} &\;\; ::= \;\; & \X_1 \mid\X_2 \mid \X_3 \mid
 \ldots  \mid \X_n\\
\verb+e+\in\mbox{Expression} & ::= & \verb+X+ \mid \verb/X + Y/ \mid 
\verb+X * Y+\mid\verb+0+\\
\verb+C+\in\mbox{Command} & ::= & \verb+skip+ \mid \verb+X:=e+ 
                                \mid \verb+C+_1 \semi \verb+C+_2 
                                \mid \texttt{loop} \; \X  \; \texttt{\{C\}} \\
                         & \mid & \texttt{choose}\; \{ \C_1 \} \; \texttt{or} \; \{ \C_2 \}
 \end{array} \renewcommand{\arraystretch}{1.0}$$

\caption{Syntax of the core language $L_r$. Variables hold nonnegative
integers. \label{fig:core-lang-syntax}}
\end{figure}

\paragraph{Data \& Semantics}The only type of data processed by the core language is
nonnegative integers. Even though real programs can manipulate negative or non-integer values, those are usually not relevant to loop control so they could be dropped when abstracting a real language into the core language.
The semantics of the core language is intended for over-approximating a realistic
program's semantics. Therefore, the core language is nondeterministic.
The {\tt choose} command represents a nondeterministic choice, and can be used to abstract
any concrete conditional command by simply ignoring the condition.
The {\em loop command\/} 
$\verb+loop+\,\X_\ell\,\verb+{C}+$ repeats \pgt{C} a number of times
bounded by the value of $\X_\ell$. Thus, it 
may be used to model different kinds
 of loops (for-loops, while-loops).
%as long as a bounding expression can be statically determined (possibly by
%an auxiliary analysis such as~\cite{CS:01,PR:04}).
The use of bounded loops restricts the computable functions
to be primitive recursive, but this is still rich enough to make
the analysis problem challenging.

\paragraph{Goals of the analysis.}  
The \emph{polynomial-bound analysis problem} is to find,
for any given command, which output variables
are  bounded by a polynomial in the input variables. This is the problem we will fix
our attention on, although we will also consider a variant:
The \emph{linear-bound problem} identifies linearly-bounded output values instead.

\paragraph{Examples.} In the following program,
variables may grow exponentially.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_4$ {
   X$_3$ := X$_1$+X$_2$;
   choose { X$_1$ := X$_3$ } or { X$_2$ := X$_3$ };
 }
\end{Verbatim}
However, the following version is polynomially bounded:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_4$ {
   X$_3$ := X$_1$+X$_2$;
   choose { X$_1$ := X$_3$ } or { X$_2$ := 0 }
 }
\end{Verbatim}
In the next example X$_2$ may grow exponentially inside the loop body, but only if  \verb+X+$_3$\verb+ := 0+ is not executed:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_1$ {
	choose {
		X$_2$ := X$_3$ + X$_3$
	}	
	or {
		choose {
			X$_3$ := X$_2$
		}
		or {
			X$_3$ := 0
		}
	}		
}
\end{Verbatim}
% A more complicated example:
% \begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
% loop X$_5$ {
%   choose { X$_3$ := X$_1$+X$_2$;   X$_4$ := X$_2$ }
%       or { X$_1$ := 0;   X$_4$ := X$_1$+X$_2$ };
%   X$_1$ := X$_3$ + X$_4$
% }
% \end{Verbatim}


\section{Background}

\subsection{Related Work}
\paragraph{}
In this document we describe algorithms based on  ~\cite{BA10} where the implementation relies on data structures that were used in ~\cite{BJK08}.
The work in~\cite{BJK08,BA10} was innovative and unique due to the combination of three points:
\be
\item
While abstract interpretation ~\cite{Cou96} is the de-facto standard way of presenting many program analyses, growth-rate analysis has not been one of 
the typical applications of abstract interpretation. There have been few previous works of this kind:
Niggl and Wunderlich~\cite{NW06} and later Jones and Kristiansen~\cite{JK08} studied similar languages with similar methods.
Abstract interpretation has also been proposed for space consumption in a functional language \cite{MPS10}, which is more remotely related.
\item
The search for a complete solution, even if for a restricted problem.
Typically, AI-based program analysis is imprecise: when one wants to check a certain property, the analysis can be either unsound (giving false positives)
or incomplete (giving false negatives). For example, both~\cite{NW06} and~\cite{JK08} only gave ``safe'' upper bounds on
the complexity. It is quite unusual for an AI to be a sound and complete decision procedure.
\item
A bigger difference is observed from most other methods that have been proposed
for complexity analysis (e.g., \cite{AAGP:sas08,SPEED-POPL09,ADFG:2010}), since these methods rely heavily on linear invariant
generation and do not deal well with loops that rely on non-linear computed values.
\ee

\paragraph{}
Our analysis approach involves the study of complexity properties of an abstracted core language.
Research in Implicit Computational Complexity (ICC) has produced numerous examples of programming languages that are so restricted that
they capture an intended complexity class, that is, compute all, and only, functions of that class.
Early examples include ~\cite{Cob64, BC92}. These restrictions may be seen (or are even explicitly presented)
as imposing a certain type system on a language which, otherwise, could also compute outside the intended complexity class; 
but this is not an automated analysis in the sense that the programmer has to supply the ``types" (in ~\cite{Cob64} these are explicit resource bounds). 
In these cases one might describe the technique more as certification than analysis. 
However, ICC research has also developed some methods that were later put to effective use in automated analysis.
Two notable examples are the method of term interpretations and the method of linear types ~\cite{Hof03}, which yielded strong analysis techniques
as described, e.g., in ~\cite{HH10, JHLH10}.

\paragraph{}
In ~\cite{BA10} the core language of ~\cite{BJK08} was extended with a capability to reset a variable to zero, 
and the analysis problem was proven to be decidable.
Several other extensions to the core language were considered by Ben Amram and Kristiansen in ~\cite{BK11},
who investigated the decidability of the \emph{feasibility problem} for those extensions.
A program is called \emph{feasible} if all values it computes are polynomially bounded in terms of the input 
(one can see how the feasibility problem is related the analysis problem of ~\cite{BJK08, BA10}).
The extensions considered were definite loops, with several kinds of assignment statements.
It was shown that the feasibility problem is undecidable with the ordinary and lossy assignments
(that is, assignments where the modified variable may receive any value bounded by the given expression, even zero)
however it is decidable for the core language with definite loops and max assignments 
(that is, assignments where the modified variable never decreases its value).
In addition, work on other extensions of ~\cite{BJK08} is being carried out in other student projects under the guidance of Prof. Ben Amram.


\subsection{The Algorithm of ~\cite{BJK08}.} 
In ~\cite{BJK08}, Ben-Amram, Jones and Kristiansen showed that for the
language obtained from $L_r$ by omitting the constant 0, denoted $\Lbjk$,
the polynomial-bound analysis problem is in PTIME. The algorithm that does this uses a technique of assigning a
\emph{certificate} to each command. The certificate is a finite structure
(basically, a matrix) which summarizes the input-output dependences in the command: 
how each of the output values depends on each of the inputs.
The analysis is compositional, which means that
a certificate for a composite command only depends on those of its parts.
The main novelty in the algorithm of~\cite{BJK08} was
a new kind of certificate, called a \emph{data flow relation}, which encode sufficient information for
precise analysis of the chosen core language. As the data flow relation (or \emph{DFR} for short) has proven to be
a convenient and relatively compact way to represent the variable data dependency information, it is also used by all
the analysis algorithms described in this document.

\subsection{Extending the Core Language.} 
One of the open problems left by~\cite{BJK08} was analysis of extended
versions of the core language, and ~\cite{BA10} presented a first step forward. 
The extension considered is the capability to reset a variable to zero.
One way to explain the difficulty caused by resets is an increased context-sensitivity: for example, the command \verb/X:=Y*Z/ 
introduces a dependence of \pgt{X} on \pgt{Z},
but not if \pgt{Y} is zero.
We solve this problem by employing context-sensitive analysis according to the \emph{proof system} for the lack of polynomial bounds, 
described in ~\cite{BA10} (contrary to the a compositional analysis to certify polynomiality, as given by ~\cite{BJK08}).
We introduce some basic definitions, taken from ~\cite{BA10}:
\bdfn
The set of \emph{dependence types} is
$\deptypes =  \{1,1^+,2,3\}$, with order $1 < 1^+ < 2 < 3$, and binary maximum operator
$\sqcup$.  We write $x\simeq 1$ for $x\in\{1,1^+\}$.
\edfn

Verbally, we may refer to these types as:

$1=$\emph{identity dependence},
$1^+=$\emph{additive dependence},
$2=$\emph{multiplicative dependence},
$3=$\emph{exponential dependence}. 
\bdfn
$\inds$ is the set of all variable indices in the program.
\edfn\bdfn
A \emph{context} or \emph{reset context} is a subset of $\inds$. The set of all reset contexts is denoted by $\mathbb{C}$.
\edfn

\bdfn
A \emph{dependency}, denoted by $\unarydep{i}{d}{j}$, describes that the variable $X_j$ is dependent on variable $X_i$ with dependence type $d \in \mathbb{D}$.
\edfn
\bdfn \label{def-dep-judge}
A \emph{dependence judgement} is $\C,P \vdash D,Q$ where $\C$ is a command, $P,Q$ are reset contexts and $D$ is a dependency.
\edfn
\paragraph{}
The \emph{pre-context} $P$ specifies variables that are presumed to hold zero; the \emph{post-context} $Q$ specifies zeros guaranteed to come out.\newline
Following is an example of how the inference rules of ~\cite{BA10} can be applied.  For instance, the inference rule

\[\frac{ i\notin P,\ i\ne l } {
 \X_l\texttt{:=0},\, P  \vdash \unarydep{i}{1}{i},\, P \cup \{ l \}
}\]

states that for any pre-context $P$, $l$ is added to the post context, and all variables which are not $X_l$ are not modified by the command $X_l$ \verb+:= 0+.
So given the command $X_1$ \verb+:= 0+, an empty pre-context, and $V=\{1,2,3\}$, the following judgements can be inferred by this rule:

\[{
 \X_1\texttt{:=0},\, \emptyset  \vdash \unarydep{2}{1}{2},\, \{1\}  \;\; and  \;\; \X_1\texttt{:=0},\, \emptyset  \vdash \unarydep{3}{1}{3},\, \{1\}
}\]
%$\X_1\texttt{:=0},\, {}  \vdash \unarydep{2}{1}{2},\, {1} \cup \{ l \}$

Following is a another example which may emphasize the context sensitive nature of our analysis. 
Let $\C$ be the command
 $\verb+loop+\; \X_3 \; \verb+{X+_1\verb/:= X/_2\verb/+X/_3\verb+}+$.
% In the following judgements, initial values are unconstrained:
We have $ \C, \emptyset  \vdash  \unarydep{2}{1}{2},   \emptyset$ ($\X_2$ is not
modified) and $ \C, \emptyset  \vdash  \unarydep{2}{1^+}{1},   \emptyset$ ($\X_1$ may be
set to $\X_2$ plus something else).
%  \[ \renewcommand{\arraystretch}{1.5}\begin{array}{llrl}
%  \C, \{ \}  &\vdash  & \unarydep{2}{1}{2},  & \{ \} \\
%  % \C, \{ \}  &\vdash  & \binarydep{2}{3}{1}{1},  & \{ \} \\
%  \C, \{ \}  &\vdash  & \unarydep{2}{1^+}{1},  & \{ \}  \,.
%  \end{array}
%  \]
If $\X_3$ is initially zero, the loop does not execute. Therefore
$\C, \{ 3 \}  \vdash  \unarydep{2}{1^+}{1},  Q $
% \[ \begin{array}{llrl}
%  \C, \{ 3 \}  &\vdash  & \unarydep{2}{1^+}{1},  & Q 
%   \end{array}
%  \]
does \emph{not} hold for any $Q$.
However, $\C, \{ 3 \}  \vdash  \unarydep{1}{1}{1},  \{ 3 \}$
% \[ \begin{array}{llrl} \C, \{ 3 \}  &\vdash  & \unarydep{1}{1}{1},  & \{ 3 \}
%   \end{array}
%  \]
holds: $\X_1$ is not modified and $\X_3$ is guaranteed to remain zero.
\paragraph{}
One major problem is that the number of dependence judgements computed for each command is exponential in relation to the number of variables in the program. 
Thus applying the inference rules with brute force can cause an explosion of dependence judgements. 
Although it is shown in ~\cite{BA10} that proving the lack of polynomial bounds using the proof system of ~\cite{BA10} can be done in  \emph{PSPACE} 
our implementation will not attempt to keep the space within polynomial bounds because it will make the implementation much harder and possibly prevent us
from considering optimizations to the analysis which will reduce the number of reset contexts computed (though in the worst case it can still  be exponential).
Our priority is to reduce the algorithm runtime.

\section{Project Goals}
\subsection{Algorithm Engineering.}
The main goal of this project was to investigate the efficiency trade-off in the implementation of analysis algorithms based on the inference rules of  ~\cite{BA10},
i.e., how much can we benefit from working hard on optimizing the analysis algorithm?. 
The following approaches were considered:
\paragraph{The ``Naive" Approach.} The Abstract Syntax Tree is processed bottom-up and for every command the set of all true judgments are generated. 
This implies exponential complexity of the algorithm as for each command the number of possible reset contexts is exponential with respect to the number of variables in the program.
\paragraph{The Abstract Interpreter.} The program is processed in a top-down fashion, where for each command we generate all dependence judgments according to the
 inference rules of ~\cite{BA10} and the reset contexts that were computed so far from previous commands.
 This approach requires special care to avoid redundant re-calculations since commands within a
 loop body may be processed over and over again with the same potential results, 
but it also holds promise of great efficiency gain relative to the ``naive" approach since instead of considering all 
possible pre-contexts when processing a command, we consider only the reachable pre-contexts.

\subsection{Output Extension.}
Another goal of the project was to make the analysis results more informative by extending the output to include a description of a computational path
which results with each computed variable dependence. 
This was achieved by decorating each data flow with an expression that describes a computational path that realizes the computed variable dependency.

\section{Technical Background (~\cite{BA10} )}
\label{sec-sop}
As mentioned before, the algorithms described in the document make use of the inference rules described in ~\cite{BA10}.
Following is a technical introduction for the proof system of ~\cite{BA10}.
\subsection{Proof System Ingredients}
\label{subsec-ProofSystem}
The basic ingredient in the proof system of ~\cite{BA10} is called a \emph{dependence fact}.
In its simple (unary) form, it indicates that an output variable depends, in a certain way, on some
input variable. The set of variable indices is denoted by $\inds$ with generic elements $i,j,k$
etc.

\bdfn \label{def-binary-dep}
The set of \emph{dependences} $\deps$ is the union of two sets: \\
(1) The set of \emph{unary dependences}, isomorphic to 
$\inds\times\deptypes\times\inds$. The notation for an element is
\ $\unarydep{i}{\delta}{j}$. \\
(2) The set of \emph{binary dependences}, isomorphic to %\ $\binarydeps = 
$\inds\times\inds\times\inds\times\inds$, where the notation for an element is
\ $\binarydep{i}{j}{k}{\ell}$.
\edfn

Informally, a binary dependence represents a conjunction, namely the fact that two unary dependences hold simultaneously.
 This is only used when the dependences in question are of types $1$ or
$1^+$, and when ${i\ne j} \lor {k\ne \ell}$
 (a similar mechanism was used in~\cite{BJK08}).  
Here is an example to help understanding the purpose of binary dependences. 
consider the following command \verb+C+:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
choose { X$_2$ := X$_3$; X$_3$ := X$_1$} or { skip }
\end{Verbatim}
we can deduce the unary dependences $\unarydep{3}{1}{2}$ and $\unarydep{3}{1}{3}$,
since the initial value of ${\tt X}_3$ may flow into ${\tt X}_2$. And it
may also remain in ${\tt X}_3$ if the second branch is taken.
But the pair $\{3\to 2, 3\to 3\}$ cannot be deduced, indicating that these
two data flows are not simultaneous.
 Therefore, following \verb+C+ with
the command
$$\verb/ X/_3 \verb/ := X/_2 \verb/ + X/_3$$
will \emph{not} create the data flow $\unarydep{3}{2}{3}$.
 
\subsection{Inference rules for assignments}
 
 We list the {\tt skip} command among the assignments. It is, in fact, equivalent to
 $\verb+X+_1\verb/:= X/_1$.
 
\be
\item (Unary rule for \verb+skip+) \par
\[\frac{ i\notin P } { \texttt{skip},\, P  \vdash \unarydep{i}{1}{i},\, P } \]

\item (Unary rule for $\X_l\verb+:=0+$) \par
\[\frac{ i\notin P,\ i\ne l } {
 \X_l\texttt{:=0},\, P  \vdash \unarydep{i}{1}{i},\, P \cup \{ l \}
}\]

\item (Unary rules for  $\verb+X+_l\verb/:=X/_r$) \par
 For any context $P$, let
$P_{\ell,r} = P \setminus \{l\} \cup \{l\mid \text{ if $r\in P$}\}$. 
\[\frac{ i\notin P,\  i\ne l}
{\X_l\texttt{:=X}_r,\, P  \vdash \unarydep{i}{1}{i},   P_{\ell,r}}
\qquad
\frac{r\notin P}
{\X_l\texttt{:=X}_r,\, P  \vdash \unarydep{r}{1}{l},   P_{\ell,r}}
\]

\item (Unary rules for  $\verb+X+_l\verb/:=X/_r\verb/*X/_s$) \par
\label{itm:rules-mult}
  For any context $P$, let
$P_{l,r,s} = P \setminus \{l\} \cup \{l\mid \text{ if $r\in P$ or $s\in P$}\}$.

\[\frac{i\notin P,\ i\ne l}
{ \X_l\texttt{:=X}_r\texttt{*X}_s,\, P  \vdash \unarydep{i}{1}{i},\, P_{l,r,s} }
\qquad
\frac{r,s\notin P,\ t\in \{r,s\}}
{\X_l\texttt{:=X}_r\texttt{*X}_s,\, P  \vdash \unarydep{t}{2}{l},\, P_{l,r,s}
}\] 

\item (Unary rules for  $\verb+X+_l\verb/:=X/_r\verb/+X/_s$, where $r\ne s$) \par
  For any context $P$, let
$P_{l,r,s} = P \setminus \{l\} \cup \{l\mid \text{ if $r,s\in P$}\}$.

\begin{tabular}{*{2}{p{0.4\textwidth}}}
\[\frac{i\notin P,\ i\ne l}
{\X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{i}{1}{i},\, P_{l,r,s} }
\]
&
\[\frac{r\notin P,\ s\in P}
{ \X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{r}{1}{l},\, P_{l,r,s} }
\] \\
\[\frac{r\in P,\ s\notin P}
{\X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{s}{1}{l},\, P_{l,r,s} }
\]
&
\[\frac{r,s\notin P}
{\X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{t}{1^+}{l},\, P_{l,r,s} 
  \text{ for $t\in\{r,s\}$.}
}\]
\end{tabular} 

\item (Binary rules for assignments) \par
Let $\C$ be any of the above commands. If, for $i,i'\notin P$, and $j,j'\notin Q$,
where $i\ne i'$ or $j\ne j'$, we have
$\C,P  \,\vdash \,\unarydep{i}{r_1}{j},  Q$ and
$\C,P  \,\vdash \,\unarydep{i'}{r_2}{j'},  Q$, where $r_1,r_2\simeq 1$, then 
$\C,P  \,\vdash \,\binarydep{i}{i'}{j}{j'},  Q$.
\ee

\subsection{Inference rules for composite commands}

The composite commands are the choice, sequential composition and
the loop.

\paragraph*{Choice} is simplest, handled by the obvious rules:

%Choice:
\begin{equation}\mytag{C}
\frac{   {\tt C}_1, P \vdash D, Q }
{ {\tt choose\,C}_1 {\tt or\,C}_2, P \vdash  D, Q }
\qquad
\frac{   {\tt C}_2, P \vdash D, Q }
{ {\tt choose\,C}_1 {\tt or\,C}_2, P \vdash  D, Q }
\end{equation}

\paragraph{Sequential composition} requires an operator for abstract composition,
that is, composition of dependences.
 
 \bdfn
 The binary operation $\cdot$ is defined on $\deps$ by the following rules:
\[\renewcommand{\arraystretch}{2} 
\begin{array}{ccc}
   (\unarydep{i}{\alpha}{j}) \cdot (\unarydep{j}{\beta}{k}) &=& 
   \multicolumn{1}{l}{\longunarydep{i}{\alpha\sqcup\beta}{k}} \\  
   (\unarydep{i}{\alpha}{j}) \cdot (\binarydep{j}{j}{k}{k'}) &=& (\binarydep{i}{i}{k}{k'}),
\quad\text{provided $\alpha \simeq 1$} \\
    (\binarydep{i}{i'}{j}{j}) \cdot (\unarydep{j}{\alpha}{k}) &=& (\binarydep{i}{i'}{k}{k}),
\quad\text{provided $\alpha \simeq 1$} \\
   (\binarydep{i}{i'}{j}{j'}) \cdot (\binarydep{j}{j'}{k}{k'}) &=& 
\left\{\begin{array}{cl}
\binarydep{i}{i'}{k}{k'},  &  \text{ if $i\ne i'$ or $k\ne k'$} \\
\unarydep{i}{2}{k},  &  \text{if $i=i'$ and $k= k'$}
\end{array}\right.
 \end{array}
\]
 \edfn
We now have the rule
%Sequential composition:
\begin{equation}\mytag{S}
\frac{   {\tt C}_1, P \vdash D_1, Q \quad {\tt C}_2, Q \vdash D_2, R }
{  {\tt C}_1 {\tt ;C}_2, P \vdash   {D_1\cdot D_2}, \, R }
\end{equation}
Naturally, the rule is only applicable if $D_1\cdot D_2$ is defined.

\paragraph*{The loop} involves the possibility of growth that depends on the number of 
iterations.  To handle this, we introduce a \emph{loop correction} operator (not unlike
the one in~\cite{BJK08}).

\bdfn \label{def:LC}
The loop correction operator $LC_\ell : \deps\to\deps$ is defined by
\begin{align*}
LC_\ell (\unarydep{i}{1^+}{i}) &= 
   \unarydep{\ell}{2}{i}   \\
LC_\ell (\unarydep{i}{2}{i}) &= 
   \unarydep{\ell}{3}{i}  
%   \\
%LC_\ell (\unarydep{i}{1}{i}) &= 
%   \unarydep{i}{1}{i}   
\end{align*}
\edfn
Explanation: in the first case, $\X_i$ has something added to it. Intuitively, if this happens
inside a loop, it
results in growth that is at least linear in the number of iterations.
In the second case, $\X_i$ is multiplied by something, which results in exponential growth.
%The third case is just a convenience.

There are three loop rules. The first covers the case that the body is not executed.
%Loop: 
\begin{equation}\mytag{L0} \label{eq-loop-L0}
\frac{ \texttt{skip}, P  \vdash D, P } 
{{\tt loop\,X_\ell\{C\}}, P \vdash   D, P }
\end{equation}

The second describes the result of any number $m>0$ of iterations.
% \begin{equation}\mytag{L1}
% \frac{ (\exists m) (\exists P_0,\dots,P_m, D_1,\dots,D_m)
% ( \ell\notin P_0 \land (\forall i<m)\, \C, P_i  \vdash D_{i+1}, P_{i+1} ) } 
% {{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot D_2\cdot\ldots\cdot D_m}, P_m }
% \end{equation}
%%%% A better version - without \exists and \forall :
\begin{equation}\mytag{L1} \label{eq-loop-L1}
\frac{ {\tt C}, P_0  \vdash D_{1}, P_{1}\quad {\tt C}, P_1  \vdash D_{2}, P_{2}\quad\dots\quad
{\tt C}, P_{m-1}  \vdash D_{m}, P_{m}\qquad \ell\notin P_0 }
{{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot D_2\cdot\ldots\cdot D_m}, P_m }
\end{equation}

The third applies the LC operator.
% \begin{equation}\mytag{L2}
% \frac{ (\exists P_0,P_1=P_2,P_3, D_1,D_2,D_3)
% (\forall 0\le i<3)\, {\tt loop\,X_\ell\{C\}}, P_i  \vdash D_{i+1}, P_{i+1} }
% {{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot LC_\ell(D_2)\cdot D_3}, \, P_3 }
% \end{equation}
%%%%% A better version without \exists :
\begin{equation}\mytag{L2} \label{eq-loop-L2}
\frac{ 
{\tt loop\,X_\ell\{C\}}, P_0  \vdash D_{1}, P_{1} \quad
{\tt loop\,X_\ell\{C\}}, P_1  \vdash D_{2}, P_{1} \quad
{\tt loop\,X_\ell\{C\}}, P_1  \vdash D_{3}, P_{3} \qquad \ell\notin P_0}
{{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot LC_\ell(D_2)\cdot D_3}, \, P_3}
\end{equation}

Note that as $LC_\ell$ is applied to $D_2$, we require that $D_2$ be a dependence
that can be iterated: this requires that the pre-context and post-context be
the same.

\section{Technical Background (~\cite{BJK08} )}
One important data structure described in ~\cite{BJK08} was integrated into the algorithms of this document. 
This data structure is the \emph{Data Flow Relation}, or $DFR$ for short. 
A \emph{Data Flow Relation} is a pair $(M,R)$ where $M$ is a set of unary dependences and $R$ is a set of binary dependences, 
as described in \ref{def-binary-dep}. $M$ may be viewed as a bipartite, labeled digraph where the left-hand (source) side represents the input variables 
and the right-hand (target) side represents the output.
In  ~\cite{BJK08} $M$ was defined as a matrix such that M$_{ij}$ is the dependency type of X$_j$ on X$_i$. 
Our definition of $M$ differs from ~\cite{BJK08} by the fact that $\mathbb{D}$ does not include the dependence type 0, 
which was necessary to fill matrix cells representing non existing dependences.

\subsection{Data Flow Relation}
Some formal definitions of DFR and its operations.
\paragraph{Definitions.}
\begin{enumerate}
\item For a unary dependence set $M$, $\onesof(M)$ is the set of dependences labeled by $\{1, 1^+\}$ in $M$.


\item For a set $S$, $C_2 (S)$ is the set of 2-sets (unordered pairs) over $S$.


\item The identity unary dependence set $I$ contains all dependences of the form $\unarydep{i}{1}{i}, i \in V$.


\item Let $r(M) = C_2(\onesof(M)).$

\item For two unary dependence sets $M_1 , M_2$, the $\sqcup$ operator is defined as follows. 
\label{def-sqcup-DFR}
\begin{eqnarray*}
 M_1 \sqcup M_2 = &\{ \unarydep{i}{d}{j}\; |\;( \exists d',d''\in \mathbb{D} : \unarydep{i}{d'}{j} \in M_1 \wedge \unarydep{i}{d''}{j} \in M_2 \wedge d = d' \sqcup d'') \vee \\
                  &(\unarydep{i}{d}{j} \in M_1 \wedge \not\exists d'\in \mathbb{D} : \unarydep{i}{d'}{j} \in M_2) \vee \\
                  &(\unarydep{i}{d}{j} \in M_2 \wedge \not\exists d'\in \mathbb{D} : \unarydep{i}{d'}{j} \in M_1)\} \\
\end{eqnarray*}


\item \label{def-unary-set-prod}
For two unary dependence sets $M_1 , M_2$, the $\cdot$ operator is defined as follows.
\begin{eqnarray*}
 M_1 \cdot M_2 = &\{\unarydep{i}{d}{j}\; |\;\exists k\in V, \;\; d',d'' \in \mathbb{D}\; :  \; \unarydep{i}{d'}{k} \in M_1 \wedge \unarydep{k}{d''}{j} \in M_2 \wedge d = d' \sqcup d'') 
\end{eqnarray*}

%TODO: the next item allows arcs i-(1)->j, i-(2)->j to be in the same M, only one dependence should be allowed for each ordered pair of variables.
\item $\dfrs$ is 
$\{(M,R) \st M \subseteq \inds\times\deptypes\times\inds \mbox{ and } R\subseteq
 C_2(\onesof(M)\}\,.$
\end{enumerate}
\paragraph{Operations.}
For compact notation,
instead of writing $\{i\to j, i'\to j'\}\in R$ we may write $R(i,j,i',j')$.

\begin{enumerate}
\item Sum of dataflow relations:
$$(M_1,R_1)\sqcup(M_2,R_2) \stackrel{def}{=} (M_1\sqcup M_2, 
 (R_1\cup R_2)\cap C_2(\onesof(M_1\sqcup M_2))).$$ 

\item Product of dataflow relations:
Let $(M,R)$, $(M',R')$ be dataflow relations;
$$(M,R)\cdot (M',R') \stackrel{def}{=} (M'', R''),$$
where: 
\begin{eqnarray*}
M'' &=& (M \cdot M')\sqcup \{ i\stackrel{2}{\to} j \mid \exists s\ne t . R(i,s,i,t) \land R'(s,j,t,j) \} \\
R'' &=&
\{ \{i\to j, i'\to j'\}\in \uop{\onesof(M'')}
 \mid \exists s,t . R(i,s,i',t) \land R'(s,j,t,j') \}
\\ &\cup&
\{ \{i\to j, i\to j'\}\in \uop{\onesof(M'')} 
 \mid \exists s . (i,s)\in\onesof(M) \land R'(s,j,s,j') \}
\\ &\cup&
\{ \{i\to j, i'\to j\}\in \uop{\onesof(M'')}
 \mid \exists s . R(i,s,i',s) \land (s,j)\in\onesof(M') \}.
\end{eqnarray*}

%\emph{Remark 1:} The purpose of the product is to describe the result of
%sequential composition of commands. The rule defining $M''$ is in fact the
%\emph{raison d'\^etre} of the $R$ part. The summand dependent on $R$
%``catches'' commands whose effect
%is to double a variable's value by adding two copies of it, a situation
%described by \emph{the diamond}:

%\begin{picture}(200,80)(0,65)
%\put(49,100){$i$}
%\put(98,70){$t$}
%\put(98,130){$s$}
%\put(150,100){$j$}
%\put(55,107){\vector(2,1){40}}
%\put(55, 95){\vector(2,-1){40}}
%\put(105,127){\vector(2,-1){40}}
%\put(105, 75){\vector(2,1){40}}
%\end{picture}
%
%Doubling is important because if the net effect of a command is
%to set \pgt{X} to twice its initial value, repeating this command in a
%loop generates exponential growth. The reader may have guessed that if this
%situation occurs in analyzing a program, the two meeting arcs will necessarily
%be labeled with $1^+$.
%
%\emph{Remark 2:} the product is associative and distributes over $\sqcup$,
%i.e., $$(M,R)\cdot ((M_1,R_1)\sqcup(M_2,R_2)) =
%       ((M,R)\cdot (M_1,R_1))\sqcup((M,R)\cdot (M_2,R_2))\,.$$
%(the reader may enjoy proving these properties as a way to gain insight into
%our definitions).

\item Powers: defined by $(M,R)^0 = (I,r(I))$ and $(M,R)^{i+1}
= (M,R)^i \cdot (M,R)$.

\item Loop Correction (for loop variable $\X_\ell$): for a DFR $(M,R)$, define $LC_\ell(M,R) = (M',R')$ where
$M'$ is identical to $M$ except that:
\begin{enumerate} 
\item
 For all $j$ such that $\unarydep{j}{d}{j} \in M$, $d \ge 2$, $M'$ will contain $\unarydep{l}{3}{j}$. 
\item \label{def-LC-2}
 For all $j$ such that $\unarydep{j}{1^+}{j} \in M$, $M'$ will contain $\unarydep{l}{2}{j}$.
\end{enumerate}
and $R' = R\cap C_2(\onesof(M'))$.

Remarks: the first rule reflects the exponential growth that results from
multiplying a variables value (even just by 2) inside a loop.
The second rule reflects the behaviour of \emph{accumulator variables}.
Here, ${\tt X}_j$ is an accumulator. On an intuitive level, we know
that some quantity $y$ is added to it
in the loop. Therefore, the effect of the loop will be to add
${\tt X}_\ell\cdot y$, hence the correction to $M_{\ell j}$.

\end{enumerate}

\subsection{DFR Example}
The dependencies deduced from the command X$_1$:=X$_2$; X$_2$:= X$_3$ are shown as a $DFR$ in figure \ref{fig:DFR_example}.
The black arcs represent unary dependences and the double blue arcs represent binary dependences.
X$_2$ is copied to X$_1$ which is represented by the unary dependence $\unarydep{2}{1}{1}$. 
Afterwards X$_3$ is copied to X$_2$, resulting with the dependence $\unarydep{3}{1}{2}$. X$_3$ itself is not modified. This is represented by the dependence $\unarydep{3}{1}{3}$.

%graph figure of DFR.
\begin{figure}[t]
$$
\begin{array}{cc}
\includegraphics[scale=0.4]{DFR_M.png} & \includegraphics[scale=0.4]{DFR_R.png} 
\end{array}
$$
\caption{A graph representation of a DFR. The black arcs represents $M$ and the double blue arcs represents $R$. \label{fig:DFR_example}}
\end{figure}

%For example, suppose that command C is \verb+X+$_1$\verb+ := X+$_2$\verb& + X&$_2$, then $M$ will contain the arcs $2\xrightarrow{2}1$, and $2\xrightarrow{1}2$, therefore $R$ will include the 2-sets: \\\\ 
%$\binarydep{2}{2}{1}{1}, \; \binarydep{2}{2}{2}{2}$, and $\binarydep{2}{2}{1}{2} $.
%$\{2 \rightarrow 1, 3 \rightarrow 1\},\{2 \rightarrow 2, 2 \rightarrow 2\},\{3 \rightarrow 3, 3 \rightarrow 3\}, \{2 \rightarrow 2, 3 \rightarrow 3\}, \{3 \rightarrow 1, 3 \rightarrow 3\}, \{3 \rightarrow 1, 2 \rightarrow %2\},\\\\ \{2 \rightarrow 1, 3 \rightarrow 3\}, \{2 \rightarrow 1, 2 \rightarrow 2\} $.

\subsection{Usage of DFRs}
A $DFR$ provides a compact way of describing the dependences between the program variables. 
M and R taken together are used to describe all the dependences deduced for a command.
Given a command \verb+C+, a pre-context $P$ and a post-context $Q$ the set of all dependence judgements can be written as follows (example):\newline \newline
\{\verb+C+,$P \vdash \unarydep{1}{2}{1},\, Q$, \; \verb+C+,$P \vdash \unarydep{2}{1}{2},\, Q$, \; \verb+C+,$P \vdash \unarydep{2}{2}{1},\, Q$, \; $...$\;\}\newline \newline
A compact notation for describing this set is \verb+C+,$P \vdash D,\, Q$, where $D$ is a \emph{Data Flow Relation}.
Looking at this notation we come up with a definition of a \emph{context graph}.
\bdfn
A \emph{context graph} is a directed graph where each vertex represents a reset context and each arc is associated with a $DFR$.
\edfn
A context graph can hold all dependence judgements given all possible pre-contexts and post-contexts.
It is an intuitive data structure for maintaining the dependence judgements during the program analysis.

\section{The ``Naive" Approach}
In this section we describe an analysis algorithm based on the inference rules of ~\cite{BA10} which takes a ``Naive" approach to the problem:
The Abstract Syntax Tree is processed bottom-up and for every command the set of all true judgments are generated based on the inference rules . 
This implies exponential complexity of the algorithm runtime and space as for each command the number of reset contexts is exponential with respect to the number of variables in the program.
 
\subsection{Preliminaries}
Following are some definitions used in the algorithm description.

\bdfn
Let $\mathbb{G}$ be the set of all context graphs.
\edfn

\bdfn
Let $G$ be a \emph{context graph}, then $G_{P,Q}$ is the $DFR$ associated with the arc from context $P$ to context $Q$ in $G$.
\edfn

\bdfn
We define the \emph{compositional operator} ``$\cdot$" for context graphs as follows: \newline 
Let $G,G'$ be some \emph{context graphs}, then
 $G \cdot G' = G''$ where: \newline
1. $E(G'') = \{ (P,Q) \; | \; \exists P' \in \mathbb{C} :  (P,P') \in E(G) \wedge (P',Q) \in E(G') \}$ \newline
2. $G''_{P,Q} = \bigsqcup G_{P,P'} \cdot G'_{P',Q}$ such that $(P,P') \in E(G)$ and $(P',Q) \in E(G')$ 
\edfn
In the expression  $G_{P,P'} \cdot G'_{P',Q}$ the ``$\cdot$" operator is the one defined for $DFR$s.

\bdfn
We define the \emph{join} or \emph{least upper bound}  operator ``$\sqcup$" for  context graphs as follows.
Let $G,G'$ be some \emph{context graphs}, then \newline $G \sqcup G' = G''$ where: \newline
1. $E(G'') = \{ (P,Q) \;| \; (P,Q) \in E(G) \vee (P,Q) \in E(G') \}$  \newline
2. $(P,Q) \in E(G) \wedge (P,Q) \not\in E(G') \rightarrow G''_{P,Q}=G_{P,Q}$  \newline
3. $(P,Q) \not\in E(G) \wedge (P,Q) \in E(G') \rightarrow G''_{P,Q}=G'_{P,Q}$ \newline
4. $(P,Q) \in E(G) \wedge (P,Q) \in E(G') \rightarrow G''_{P,Q}=G_{P,Q} \sqcup G'_{P,Q}$  
\edfn
In the expression  $G_{P,Q} \sqcup G'_{P,Q}$ the ``$\sqcup$" operator is the one defined for $DFR$s.

\bdfn
For a context $P$, we denote: \newline
1. $P^{Var}_{x,y}=P \setminus \{x\} \cup \{ x \;, \; if \; y \in P \}$ \newline
2. $P^{Sum}_{x,y,z}=P \setminus \{x\} \cup \{ x \; , \; if \; y \in P \wedge  z \in P \}$ \newline
3. $P^{Prod}_{x,y,z}=P \setminus \{x\} \cup \{ x \; , \; if \; y \in P \vee  z \in P \}$ 
\edfn

\bdfn
Let $G$ be a \emph{context graph}, then $LCG_\ell(G)$ is the \emph{loop corrected} G, it is a context graph  defined as follows:
\begin{eqnarray*}
\forall (P_0,P_2) \in E(G) : &  & \\ 
   LCG_\ell(G)_{P_0,P_2} & = \\
   G_{P_0,P_2} & \sqcup & \Big( \bigsqcup_{(P_0,P_1),(P_1,P_1),(P_1,P_2) \in E(G)} (G_{P_0,P_1}\cdot LC_\ell (G_{P_1,P_1}) \cdot G_{P_1,P_2}) \Big) \\
\end{eqnarray*}
\edfn
In the expression  $LC_\ell (G_{P_1,P_1})$, the ``$LC_\ell$" operator is the one defined for $DFR$s.

\newpage
\subsection{The ``Naive" Analysis Algorithm} \label{naive-algo}
The algorithm's input is a command of the core language and its output is a context graph.
The algorithm works bottom-up; the premises of a composite command  are analysed first, resulting with context graphs which are then combined in some way to yield
the analysis result of the composite command itself. The algorithm is defined as a semantics function $\lsem \; \rsem : {\tt Command} \rightarrow \mathbb{G}$.
We describe the algorithm output by showing the effect of applying the semantics function to each of the core language commands. 

\begin{enumerate}
\item{Command Composition.}
The context graph of command C$_1$;C$_2$ is the composition of the context graphs computed for C$_1$ and C$_2$:
\begin{eqnarray*}
% C1;C2 command analysis semantics:
\lsem{\tt C}_1 {\tt ;C}_2\rsem &=&
  \sempar{C$_1$}\cdot\sempar{C$_2$} 
\end{eqnarray*}
%choose {} or {} command analysis semantics:
\item{The \verb+choose+ Command.}
The context graph is the join of context graphs computed for the command's premises:
\begin{eqnarray*}
\lsem\verb+choose+\; \{ \verb+C+_1 \}\; \verb+or+ \; \{ \verb+C+_2 \}\rsem &=&
  \sempar{C$_1$}\sqcup\sempar{C$_2$} 
\end{eqnarray*}
%zero assignment command analysis semantics:
\item{Zero Assignment.}
The context graph computed for assignment of zero to variable X$_i$:
\begin{eqnarray*}
\lsem {\tt X}_i {\tt\; :=0} \rsem &=& Context \; graph \; G \; where: \\ 
  &1. &  E(G) = \{ (P,P \cup \{i\}) \;| \; P \in PC \} \\
  &2. &  \forall P \in \mathbb{C} : G_{P,P \cup \{i\}} = (M,R) \; s.t. \\
  &&  M = \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq i \}, \;\; R = C_2(\onesof(M))  
\end{eqnarray*}
%var assignment command analysis semantics:
\item{Variable Assignment.}
The context graph computed for assignment of variable X$_j$ to variable X$_j$:
\begin{eqnarray*}
\lsem {\tt X}_i {\tt \;:=X}_j \rsem &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Var}_{i,j}) \;| \; P \in PC \} \\
  &2.&  \forall P \in \mathbb{C} : G_{P,P^{Var}_{i,j}} = (M,R) \; s.t. \\
  && M = \{ \unarydep{j}{1}{i} | j \not \in P  \} \cup 
         \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq i \}   \\
  && R = C_2(\onesof(M)) 
\end{eqnarray*}
%sum assignment command analysis semantics:
\item{Variable Sum Assignment.}
The context graph computed for assignment of variable sum:
\begin{eqnarray*}
\lsem {\tt X}_i {\tt \;:=X}_j \;{\tt + \;X}_k \rsem &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Sum}_{i,j,k}) \;| \; P \in PC \} \\
  &2.&  \forall P \in \mathbb{C} : G_{P,P^{Sum}_{i,j,k}} =  (M,R) \; s.t. \\ &&
     M = \{ \unarydep{t}{1}{t} | t \neq i \wedge t \not \in P  \} \cup 
    \{ \unarydep{j}{1}{i} | j \not \in P \wedge k \in P \} \cup \\ &&
    \{ \unarydep{k}{1}{i} | k \not \in P \wedge j \in P \} \cup
   \{ \unarydep{t}{1^+}{i} | t \in \{j,k\} \wedge j,k \not\in P \}  \\
  && R = C_2(\onesof(M))     
\end{eqnarray*}
%product assignment command analysis semantics:
\item{Variable Product Assignment.}
The context graph computed for assignment of variable product:
\begin{eqnarray*}
\lsem {\tt X}_i {\tt :=X}_j \;{\tt * \; X}_k \rsem &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Prod}_{i,j,k}) \;| \; P \in PC \} \\
  &2.&  \forall P \in \mathbb{C} : G_{P,P^{Prod}_{i,j,k}} = (M,R) \; s.t. \\ &&
    M = \{ \unarydep{t}{1}{t} | t \neq i \wedge t \not \in P  \} \cup 
    \{ \unarydep{t}{2}{i} | t \in \{j,k\} \wedge j,k \not\in P \}  \\
  && R = C_2(\onesof(M)) 
\end{eqnarray*}
%skip command analysis semantics:
\item{The \verb+skip+ Command.}
The context graph computed is the same as the one computed for variable assignment command X$_1$ := X$_1$:
\begin{eqnarray*}
\lsem \verb+skip+ \rsem &=&
   \lsem{\tt X}_1 {\tt \;:=X}_1 \rsem 
\end{eqnarray*}

\item{The \verb+loop+ Command.}
For the command analysis semantics definition we must first define the effect of the \emph{fix} operator.
\bdfn
Let $G$ be a \emph{context graph}, then \verb+fix+$_k (G)$ is the k$th$ \emph{composition closure} of G, it is a context graph  defined as follows:
\begin{eqnarray*}
 {\tt fix}_k (G) =  \bigsqcup_{1 \le i \le k} (\lsem {\tt skip} \rsem \sqcup G)^i 
\end{eqnarray*}
Here $(\lsem {\tt skip} \rsem \sqcup G)^i$ stands for the composition of $i$ instances of $\lsem {\tt skip} \rsem \sqcup G$. 
For example, ${(\lsem {\tt skip} \rsem \sqcup G)^3 = (\lsem {\tt skip} \rsem \sqcup G) \cdot (\lsem {\tt skip} \rsem \sqcup G) \cdot (\lsem {\tt skip} \rsem \sqcup G)}$.
%the monotonicity of all DFR operations taking place and because each context graph join operation
%may only introduce new arcs to the graph without removal of existing arcs. The number of arcs in a context graph is bounded by $2^{2 \cdot |V|}$ 
%(the number of different possible ways to pick a context as source and another context as target).
%define lemma for finiteness of the closure
\edfn 
We now proceed to the loop command analysis semantics, which are simply defined as: 
\begin{eqnarray*}
%loop command analysis semantics:
\lsem {\tt loop \; X}_\ell \; {\tt C} \rsem &=& LCG_{\ell} \Big{(}{\tt fix}_{\infty} ( \lsem {\tt C} \rsem) \Big{)} 
\end{eqnarray*}
The \verb+fix+ operator applies loop rules \ref{eq-loop-L0} and \ref{eq-loop-L1} while the $LCG_{\ell}$ operator applies loop rule \ref{eq-loop-L2}, all described in \ref{subsec-ProofSystem}.
Although the fix operator applies an infinite join operation, computing the composition closure of a context graph requires only a finite number of iterations.
This property is true due to the following facts:
\begin{enumerate}
\item
The number of different possible context graphs over a variable set $V$ is finite. 
In fact it is bounded by $2^{2 \cdot |V|} \cdot ( |V|^6 \cdot |\mathbb{D}|) $.

\item
A partial order is maintained between the results of each two consecutive $k$'s of the \verb+fix+ operation. 
The partial order is stated in the following lemma, which is proven in section \ref{proof-CG-mono}:
\begin{lemma}\label{lemma-CG-mono}
Let $G$ be some context graph then \\ $\forall k \ge 1 :  {\tt fix}_k (G) \sqsubseteq {\tt fix}_{k+1} (G)$.
\end{lemma}
The $\sqsubseteq$ relation is a partial order on context graphs which will be more precisely defined in \ref{proof-CG-mono}.
\end{enumerate}

From the facts above we deduce that there exists some finite $m > 0$ s.t. $\forall n > m : {\tt fix}_n (G) = {\tt fix}_{n-1} (G)$.
\end{enumerate}
%The sequence (\verb+;+) and \verb+choose+ commands are analysed using the sequencial composition (``$\cdot$") and join (``$\sqcup$") operators.
%The \verb+skip+ command is analysed the same way a self assignment is analysed.

\subsection{Monotonicity of the {\tt fix} Operator}
\label{proof-CG-mono}
This section presents the proof of lemma \ref{lemma-CG-mono}.
First we define the $\sqsubseteq$ relation for DFRs and context graphs.
\bdfn
\label{def-mono-rel}
Let $D_1=(M_1,R_1), D_2=(M_2,R_2)$ be two $DFR$s. \\ Then $D_1 \sqsubseteq D_2$, if  and only if:
\begin{eqnarray*}
a. & \forall \unarydep{i}{d}{j} \in M_1 \; \exists d' \in \mathbb{D} \;\; : & \;\; d' \ge d \wedge \unarydep{i}{d'}{j} \in M_2 \\
b. & \forall \binarydep{i}{j}{i'}{j'} \in R_1 \;\; : & \;\; \binarydep{i}{j}{i'}{j'} \in R_2 \lor \\   
                                           &  &  (\exists d \in \mathbb{D} : d \ge 2 \wedge (\unarydep{i}{d}{i'} \in M_2 \lor \unarydep{j}{d}{j'} \in M_2))  
\end{eqnarray*}
\edfn
We note that this relation defines a partial order on $DFR$s since if some unary dependence $\unarydep{i}{d}{j}$ exists in $M_1$ then a dependence 
$\unarydep{i}{d'}{j}, d' \ge d$ must exist in $M_2$, and if a binary dependence $\binarydep{i}{j}{i'}{j'}$ exists in $R_1$ then either it also exists in
$R_2$ or there exists a unary dependence of type greater or equal to 2 which makes the binary dependence not useful since a non-linear dependence type
is already deduced on at least one of its 2-sets.

\bdfn
Let $G, G'$ be two context graphs, \\ then $G \sqsubseteq G'$ if and only if:
\begin{eqnarray*}
 \forall (P,Q)\in E(G) : G_{P,Q} \sqsubseteq G'_{P,Q} \\
\end{eqnarray*}
\edfn

\paragraph{}
We now present a lemma which states that the $\sqsubseteq$ relation is closed under the $\sqcup$ operator. This lemma is used later on to show 
the correctness of lemma \ref{lemma-CG-mono}.

\begin{lemma} \label{lemma-mono-join-closure}
Let $G,G'$ be some context graphs, then $G \sqsubseteq G \sqcup G'$.
\end{lemma}
Proof: Let $(P,Q) \in E(G)$. We show that $G_{P,Q} \sqsubseteq (G \sqcup G')_{P,Q}$: \\
From the definition of the $\sqcup$ operator for context graphs we get that \\ ${(G \sqcup G')_{P,Q} = G_{P,Q} \sqcup G'_{P,Q}}$. \\
Denote $G_{P,Q} = (M,R)$, $G_{P,Q} = (M',R')$ and $(G \sqcup G')_{P,Q} = (M'',R'')$.

\paragraph{}
Let $\unarydep{i}{d}{j} \in M$. If $\not\exists d'\in \mathbb{D} : \unarydep{i}{d'}{j} \in M'$ then according to the 
definition of the $\sqcup$ operator for DFRs in \ref{def-sqcup-DFR},
$\unarydep{i}{d}{j} \in M''$. Otherwise, $\exists d'\in \mathbb{D} : \unarydep{i}{d}{j} \in M'$, then according to the
definition of the $\sqcup$ operator for DFRs, $\unarydep{i}{d \sqcup d'}{j} \in M''$. This satisfies part a of the $\sqsubseteq$ relation definition in \ref{def-mono-rel} .

\paragraph{}
Let $\binarydep{i}{j}{i'}{j'} \in R$. If $\binarydep{i}{j}{i'}{j'} \in R''$ then part b of definition \ref{def-mono-rel} is satisfied. 
Otherwise, $\binarydep{i}{j}{i'}{j'} \not\in R''$. Since $\binarydep{i}{j}{i'}{j'} \in R$, it could only be dropped by the application of $G_{P,Q} \sqcup G'_{P,Q}$,
which performs the following: \\ 
$$(M'',R'') = (M,R)\sqcup(M',R') = (M \sqcup M', 
 (R \cup R')\cap C_2(\onesof(M\sqcup M'))).$$ .
This means that $R'' = (R \cup R')\cap C_2(\onesof(M\sqcup M'))$. Since ${\binarydep{i}{j}{i'}{j'} \in (R \cup R')}$, it must have been dropped from $R''$ by the
intersection with the set \\ ${S = C_2(\onesof(M\sqcup M'))}$. $S$ will not contain ${\binarydep{i}{j}{i'}{j'}}$ only if a dependence of type $\unarydep{i}{d}{i'}$ or
$\unarydep{j}{d}{j'}$, with $d \ge 2$ exists in $M'' = M \sqcup M'$, which means that part b of  definition \ref{def-mono-rel} is satisfied.

\paragraph{}
We showed that parts a and b of  definition \ref{def-mono-rel} are satisfied for $G_{P,Q} = (M,R)$ and $G_{P,Q} \sqcup G'_{P,Q} = (M'',R'')$. 
Since P and Q are general it follows that $\forall (P,Q)\in E(G) : G_{P,Q} \sqsubseteq G_{P,Q} \sqcup G'_{P,Q}$, as required. $\qed$

\paragraph{Proof of Lemma \ref{lemma-CG-mono}.} 

Let $G' = \verb+fix+_k (G)$ and $G'' = \verb+fix+_{k+1} (G)$. 
\begin{eqnarray*}
G'' = & \verb+fix+_{k+1} (G) = \bigsqcup_{1 \le i \le k+1} (\lsem {\tt skip} \rsem \sqcup G)^i = \\
      & (\bigsqcup_{1 \le i \le k} (\lsem {\tt skip} \rsem \sqcup G)^i) \sqcup (\lsem {\tt skip} \rsem \sqcup G)^{k+1} = \\ 
      & \verb+fix+_{k} (G) \sqcup (\lsem {\tt skip} \rsem \sqcup G)^{k+1} = \\
      & G' \sqcup (\lsem {\tt skip} \rsem \sqcup G)^{k+1} \sqsupseteq G' \;\; \qed \\
\end{eqnarray*}
\paragraph{}
The last $\sqsupseteq$ is due to lemma \ref{lemma-mono-join-closure}.

\subsection{The ``Naive" Algorithm Execution Example}
We demonstrate the execution of the ``naive'' algorithm on the program $C_{program} =$  
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_1$ {
   X$_2$ := X$_3$+X$_4$;
   choose { X$_3$ := X$_2$ } or { X$_4$ := 0 }
 }
\end{Verbatim}

For compactness we denote the program's nested commands as follows:
\begin{eqnarray*}
C_{choose} & = & {\tt choose} \; \{ \; {\tt X}_3 \; {\tt := \; X}_2 \;\}\; {\tt or }\; \{ \; {\tt X}_4 {\tt \;  :=\; 0 } \; \} \\
C_{body}   & = & {\tt X}_2 {\tt := X}_3 {\tt +X}_4 \; {\tt ;} \; C_{choose} 
\end{eqnarray*}
%$C_{choose} = $ \verb+choose { X+$_1$ \verb+:= X+$_3$ \verb+ } or { X+$_2$ \verb+ := 0  } + \\
%$C_{body} = $ \verb+X+$_3$ \verb+:=+ X$_1$\verb^+^X$_2$\verb+;+ $C_{choose}$ \\

We also note that for the above program, the set of all variables $V$ is $\{1,2,3,4\}$ and that the set of all possible contexts is: 
\begin{eqnarray*}
\mathbb{C} & = & \Big\{\; \phi , \{ 1 \},\{ 2 \},\{ 3 \},\{ 4 \},\{ 1, 2 \},\{ 1, 3 \},\{ 1, 4 \},\{ 2, 3 \},\{ 2, 4 \},\{ 3, 4 \}, \\
           & &\;\;\{ 1, 2, 3 \},\{ 1, 2, 4 \},\{ 1, 3, 4 \},\{ 2, 3, 4 \}, \{ 1, 2, 3, 4 \} \;\Big\}
\end{eqnarray*}

The algorithm analysis of $C_{program}$ results with:
\begin{eqnarray*}
\lsem C_{program} \; \rsem  &=& \lsem {\tt loop \; X}_1 \; {\tt \{ C_{body} \}} \rsem = LCG_{1}({\tt fix}_{\infty} ( \lsem {\tt C_{body}} \rsem)) = \\
                            &=& LCG_{1}({\tt fix}_{\infty} ( \lsem {\tt X}_2 {\tt := X}_3{\tt +X}_4 \rsem \cdot \lsem  C_{choose} \rsem)) = \\						
                            &=& LCG_{1}({\tt fix}_{\infty} ( \lsem {\tt X}_2 {\tt := X}_3{\tt +X}_4 \rsem \cdot
                                           (\lsem  {\tt X}_3 {\tt := X}_2 \rsem \sqcup \lsem  {\tt X}_4 {\tt := 0} \rsem))) 						
\end{eqnarray*}
We will recall the analysis semantics of each command and show the context graph computed according to it with samples of DFRs associated with its arcs.
The analysis works bottom-up so it starts by handling the leaves - the commands surrounded with $\lsem \; \rsem$ in the expression above.
Afterwards the analysis will gradually combines the results, working its way up to the program's root command, $C_{program}$.
The analysis of each leaf results with a context graph which is built according to the analysis semantics detailed in section \ref{naive-algo}:

\paragraph{Analysis of X$_3$ := X$_2$.}
We apply the semantics of variable assignment:
\begin{eqnarray*}
\lsem {\tt X}_3 {\tt \;:=X}_2 \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,\P^{Var}_{3,2}} = (M,R) \; s.t. \\
  && M = \{ \unarydep{2}{1}{3} | 2 \not \in P  \} \cup 
         \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq 3 \}   \\
  && R = C_2(\onesof(M))
\end{eqnarray*}
\begin{figure}[t] 
$$
\begin{array}{cc}
\includegraphics[scale=0.4]{naive_G_var_asgn.png} & \includegraphics[scale=0.5]{naive_DFR_M_var_asgn.png}  \includegraphics[scale=0.3]{naive_DFR_R_var_asgn.png} \\
Context\; Graph & M\; and\; R\;of\;DFR  \\
\end{array}
$$
\caption{On the left: the context graph computed for command X$_3$ := X$_2$. On the right: $(M,R)$ of DFR associated with arc $\{3,4\} \rightarrow \{4\}$. \label{fig:naive-var-asgn-graph} }
\end{figure}
The computed context graph as a bipartite digraph is shown to the left in figure \ref{fig:naive-var-asgn-graph}. You may notice that arcs leaving context $\{1,2,3,4\}$ are missing from the graph.
The reason is that no dependences can be deduced from any command if the source context contains all variables. This is true also for any arc which is associated with an empty DFR.
$M$ and $R$ of the DFR associated with arc $\{3,4\} \rightarrow \{4\}$ are also shown in figure \ref{fig:naive-var-asgn-graph}.
Notice that since the arc's source vertex (pre-context) contains variables X$_3$ and X$_4$, but does not contain X$_2$, the arc's target vertex (post-context) does not contain
X$_3$ due to the assignment of X$_2$ to  X$_3$ which changes the value of X$_3$ from zero to non-zero.

\paragraph{Analysis of X$_4$ := 0.}
We apply the semantics of zero assignment:
\begin{eqnarray*}
\lsem {\tt X}_4 {\tt\; :=0} \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,P \cup \{4\}} = (M,R) \; s.t. \\
  &&  M = \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq 4 \}, \;\; R = C_2(\onesof(M))  
\end{eqnarray*}

Results are shown in figure \ref{fig:naive-zero-asgn-graph}. Notice that given an empty pre-context, the post context contains X$_4$ and the variable dependences computed 
show that all variables except X$_4$ are not modified by the command.
\begin{figure}[t] 
$$
\begin{array}{cc}
\includegraphics[scale=0.5]{naive_G_zero_asgn.png} & \includegraphics[scale=0.4]{naive_DFR_M_zero_asgn.png}  \includegraphics[scale=0.5]{naive_DFR_R_zero_asgn.png} \\
Context\; Graph & M\; and\; R\;of\;DFR  \\
\end{array}
$$
\caption{On the left: the context graph computed for command X$_4$ := 0. On the right: $(M,R)$ of DFR associated with arc $\{ \} \rightarrow \{ 4\}$. \label{fig:naive-zero-asgn-graph} }
\end{figure}

\paragraph{Analysis of X$_2$ := X$_3$+X$_4$.}
We apply the semantics of variable sum assignment:
\begin{eqnarray*}
\lsem {\tt X}_2 {\tt \;:=X}_3 \;{\tt + \;X}_4 \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,P^{Sum}_{2,3,4}} =  (M,R) \; s.t. \\ &&
     M = \{ \unarydep{t}{1}{t} | t \neq 2 \wedge t \not \in P  \} \cup 
    \{ \unarydep{3}{1}{2} | 3 \not \in P \wedge 4 \in P \} \cup \\ &&
    \{ \unarydep{4}{1}{2} | 4 \not \in P \wedge 3 \in P \} \cup
   \{ \unarydep{t}{1^+}{2} | t \in \{3,4\} \wedge 3,4 \not\in P \}  \\
  && R = C_2(\onesof(M))     
\end{eqnarray*}

Results are shown in figure \ref{fig:naive-sum-asgn-graph}. Notice that given pre-context $\{ 2 \}$, the post context is empty and the variable dependences in $M$ labeled with dependence type $1^+$ 
show that X$_3$ and X$_4$ are summed into X$_2$.
\begin{figure}[htpb] 
$$
\begin{array}{cc}
\includegraphics[scale=0.5]{naive_G_sum_asgn.png} & \includegraphics[scale=0.35]{naive_DFR_M_sum_asgn.png}  \includegraphics[scale=0.6]{naive_DFR_R_sum_asgn.png} \\
Context\; Graph & M\; and\; R\;of \;DFR \\
\end{array}
$$
\caption{Context graph computed for command X$_2$ := X$_3$ + X$_4$ and $(M,R)$ of DFR associated with arc $\{ 2\} \rightarrow \{ \}$. \label{fig:naive-sum-asgn-graph} }
\end{figure}

\paragraph{Analysis of $C_{choose}$.}
We perform join of context graphs. 
The joined graphs are the ones shown is figures \ref{fig:naive-var-asgn-graph} and \ref{fig:naive-zero-asgn-graph}.
Resulting context graph includes all arcs which are in both graphs and DFRs associated with the arcs are joined as well.

\paragraph{Analysis of $C_{body}$.}
We perform composition of context graphs. 
The composed graphs are $G'$ - the graph shown is figure \ref{fig:naive-sum-asgn-graph}, and $G''$ - the graph computed by the analysis of $C_{choose}$.
Resulting context graph $G$ includes all arcs $(P,Q)$ such that $\exists P' \in \mathbb{C} : (P,P') \in E(G') \wedge (P',Q) \in E(G'')$.
The DFR associated with each such arc is: $G_{P,Q} = \bigsqcup_{P'} G'_{P,P'} \cdot G''_{P',R}$.

\paragraph{Analysis of $C_{program}$.}
Finally, we perform $LCG_1({\tt fix}_{\infty} (G))$, where $G$ is the context graph computed by the analysis of $C_{body}$.
Results are is shown in figure \ref{fig:naive-final-graph}. The DFR shown in the figure is the one associated with arc $\{\} \rightarrow \{4\}$.
Note that the dependences show a polynomial dependence of  X$_2$, and X$_3$ on X$_1$. 
This is true although X$_4$ is zeroed (as can be understood from the post-context $\{4\}$). It reflects the possibility of allowing X$_2$ and X$_3$ to
grow polynomially with respect to X$_1$, while still resetting X$_4$ to zero in the final loop iterations.

\begin{figure}[htpb] 
$$
\begin{array}{cc}
\includegraphics[scale=0.5]{naive_G_final.png} & \includegraphics[scale=0.5]{naive_DFR_M_final.png}  \includegraphics[scale=0.3]{naive_DFR_R_final.png} \\
Context\; Graph & M\; and\; R\;of\;DFR  \\
\end{array}
$$
\caption{The context graph computed for the program and $(M,R)$ of DFR associated with arc $\{ \} \rightarrow \{ 4\}$. \label{fig:naive-final-graph} }
\end{figure} 

\pagebreak
\section{The Abstract Interpreter}
Our analysis is targeted at finding worst case dependence judgements, and as such, if dependence $\unarydep{i}{d}{j}$ can be deduced for a command $C$ given
pre-context $\{k\}$, then it certainly can be deduced given pre-context $\{ \}$. In general we can say that assuming a variable is non zero
does not reduce the worst case dependence judgements which are found by the analysis.
The analysis algorithm described will assume, at first, that all variables are non zero and compute the possible reset contexts as it traverses the core language program AST in a top-down fashion.
The analysis approach is that of an abstract interpreter, but it will only analyse a command over pre-contexts  that have been determined to be reachable. 
This abstract interpreter ``executes" the program, but instead of applying the original core language semantics it generates context graphs for each of the
commands. Contrary to the ``naive" algorithm which analyses each command once, 
this algorithm may analyse the same command more than once given different possible pre-contexts. 

\subsection{Preliminaries}
\bdfn
For a context graph $G$, we define:
\begin{enumerate}
\item
 $Pre(G) = \{ P \in \mathbb{C} \;|\; \exists Q \in \mathbb{C} : (P,Q) \in E(G) \}$
\item
 $Post(G) = \{ Q \in \mathbb{C} \;|\; \exists P \in \mathbb{C} : (P,Q) \in E(G) \}$
\end{enumerate}
\edfn
$Pre(G)$ is the set of all pre-contexts in $G$ and $Post(G)$ is the set of all post-contexts in $G$.

\subsection{The Abstract Interpreter Analysis Algorithm} \label{absint-algo}
The algorithm's input is a command of the core language and a set $PC \subseteq \mathbb{C}$. Its output is a context graph.
The algorithm works top-down. Similarly to the ``naive" algorithm definition we define the abstract interpreter algorithm as a semantics function
$\lsem \; \rsem :   {\tt Command} \rightarrow \wp(\mathbb{C}) \rightarrow \mathbb{G}$. 
Now the input is not only a command, but also a set of pre-contexts.

\begin{enumerate}
\item{Assignment and Skip Commands.}
The different assignment commands and skip command are analysed much the same way as the ``naive" algorithm with one main difference - the pre-contexts considered
are from a given set $PC \subseteq \mathbb{C}$.
\begin{enumerate}
%zero assignment command analysis semantics:
\item{Zero Assignment.}
\begin{eqnarray*}
\lsem {\tt X}_i {\tt\; :=0} \rsem PC &=& Context \; graph \; G \; where: \\ 
  &1. &  E(G) = \{ (P,P \cup \{i\}) \;| \; P \in PC \} \\
  &2. &  \forall P \in PC : G_{P,P \cup \{i\}} = (M,R) \; s.t. \\
  &&  M = \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq i \}, \;\; R = C_2(\onesof(M))  
\end{eqnarray*}
%var assignment command analysis semantics:
\item{Variable Assignment.}
\begin{eqnarray*}
\lsem {\tt X}_i {\tt \;:=X}_j \rsem PC  &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Var}_{i,j}) \;| \; P \in PC \} \\
  &2.&  \forall P \in PC : G_{P,P^{Var}_{i,j}} = (M,R) \; s.t. \\
  && M = \{ \unarydep{j}{1}{i} | j \not \in P  \} \cup 
         \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq i \}   \\
  && R = C_2(\onesof(M)) 
\end{eqnarray*}
%sum assignment command analysis semantics:
\item{Variable Sum Assignment.}
\begin{eqnarray*}
\lsem {\tt X}_i {\tt \;:=X}_j \;{\tt + \;X}_k \rsem PC &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Sum}_{i,j,k}) \;| \; P \in PC \} \\
  &2.&  \forall P \in PC : G_{P,P^{Sum}_{i,j,k}} =  (M,R) \; s.t. \\ &&
     M = \{ \unarydep{t}{1}{t} | t \neq i \wedge t \not \in P  \} \cup 
    \{ \unarydep{j}{1}{i} | j \not \in P \wedge k \in P \} \cup \\ &&
    \{ \unarydep{k}{1}{i} | k \not \in P \wedge j \in P \} \cup
   \{ \unarydep{t}{1^+}{i} | t \in \{j,k\} \wedge j,k \not\in P \}  \\
  && R = C_2(\onesof(M))     
\end{eqnarray*}
%product assignment command analysis semantics:
\item{Variable Product Assignment.}
\begin{eqnarray*}
\lsem {\tt X}_i {\tt :=X}_j \;{\tt * \; X}_k \rsem PC &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Prod}_{i,j,k}) \;| \; P \in PC \} \\
  &2.&  \forall P \in PC : G_{P,P^{Prod}_{i,j,k}} = (M,R) \; s.t. \\ &&
    M = \{ \unarydep{t}{1}{t} | t \neq i \wedge t \not \in P  \} \cup 
    \{ \unarydep{t}{2}{i} | t \in \{j,k\} \wedge j,k \not\in P \}  \\
  && R = C_2(\onesof(M)) 
\end{eqnarray*}
%skip command analysis semantics:
\item{The \verb+skip+ Command.}
\begin{eqnarray*}
\lsem \verb+skip+ \rsem PC &=&
   \lsem {\tt X}_1 {\tt \;:=X}_1 \rsem PC
\end{eqnarray*}
\end{enumerate}

\item{Composition and Join.}
The context graph of command C$_1$;C$_2$ is the composition of context graph $G$ computed for C$_1$ with pre-contexts $PC$ and context graph
$G'$ computed for C$_2$ with pre-contexts $Post(G)$.
The context graph of a \verb+choose+ command is the join of context graphs computed for the command's premises using the same pre-contexts for both premises.
\begin{enumerate}
\item{Command Composition.}
\begin{eqnarray*}
% C1;C2 command analysis semantics:
\lsem {\tt C}_1 {\tt ;C}_2\rsem PC &=&
\Big( \lsem {\tt C}_1 \rsem PC \Big) \cdot \Big(\lsem {\tt C}_2 \rsem Post( \lsem {\tt C}_1 \rsem PC) \Big)
\end{eqnarray*}
%choose {} or {} command analysis semantics:
\item{The \verb+choose+ Command.}
\begin{eqnarray*}
\lsem \verb+choose+\; \{ \verb+C+_1 \}\; \verb+or+ \; \{ \verb+C+_2 \}\rsem PC &=&
  (\lsem {\tt C}_1  \rsem PC) \sqcup (\lsem {\tt C}_2 \rsem PC)
\end{eqnarray*}
\end{enumerate}

\item{The \verb+loop+ Command.}
For the analysis of the loop command we define the following series.
\bdfn \label{def-F-series}
Given a command $C$ and a reset context set $PC$ we define $F(C,PC)_i$ to be a series of context graphs such that:
\begin{eqnarray*}
 \; F(C,PC)_0                              & = & \lsem {\tt skip } \rsem PC  \\
 \; \forall k>0 : F(C,PC)_k                & = & F(C,PC)_{k-1} \sqcup \Bigg( \lsem C \rsem Post \Big(F(C,PC)_{k-1}\Big) \Bigg) \sqcup  \\
 	           			                   &   & F(C,PC)_{k-1} \cdot \Bigg( \lsem C \rsem Post \Big(F(C,PC)_{k-1}\Big) \Bigg) \\
\end{eqnarray*}
\edfn

\bdfn \label{def-absint-fix}
Given a command $C$ and a reset context set $PC$ we define the \verb+fix+ operator:
\begin{eqnarray*}
{\tt fix}(C,PC) &=& F(C,PC)_m  \;\; s.t. \;\; \forall n > m : F(C,PC)_n = F(C,PC)_m
\end{eqnarray*}
\edfn
\verb+fix+ can also be viewed as the limit of the $F(C,PC)$ series.
In the above definition it is guaranteed that a finite non-negative integer $m$ exists due to the following reasons 
(much like the ``naive" algorithm \verb+fix+ operator):
\begin{enumerate}
\item
The number of different possible context graphs is finite.
\item
The $F(C,PC)$ series is monotone with respect to the $\sqsubseteq$ relation (due to lemma \ref{lemma-mono-join-closure}).
\end{enumerate}

Using definitions \ref{def-F-series} and \ref{def-absint-fix} the loop command semantics are defined as:
\begin{eqnarray*}
%loop command analysis semantics:
\lsem{\tt loop \; X}_\ell \; {\tt C} \rsem PC &=& LCG_l ({\tt fix}(C,PC))    
%LCG_{\ell} \Big{(}{\tt fix}_{\infty} ( \lsem {\tt C} \rsem) \Big{)} 
\end{eqnarray*}

\subsection{The Abstract Interpreter Execution}
Using the abstract interpreter semantics defined in section \ref{absint-algo}, the analysis of a core language program given by $C_{prog}$ is done by
applying the following semantics rule:
\begin{eqnarray*}
  \lsem C_{prog} \rsem \{\}
\end{eqnarray*}
I.e., the program abstract interpretation starts by assuming that none of the variables are zero. This assumption does not tamper with the worst-case 
analysis results since assuming that variable X$_i$ has non-zero value at first may only yield dependences with larger or equal dependence type than
if we assumed that X$_i$ may be zero.
 
\subsection{Example of the Abstract Interpreter Analysis Results}
We go back to the naive analysis algorithm execution example, and note figure \ref{fig:naive-final-graph}.
Given the same program, the abstract interpreter analysis will result with the context graph shown in figure \ref{fig:absint-final-graph}.
The reader might notice that while the DFR associated with arc $\{\} \rightarrow \{4\}$ is the same as the one computed by the naive analysis algorithm,
the context graph contains only two arcs, as there are only two reachable contexts in a program abstract interpretation: $\{\}$ and $\{4\}$.

\begin{figure}[htpb] 
$$
\begin{array}{cc}
\includegraphics[scale=0.5]{absint_G_final.png} & \includegraphics[scale=0.3]{absint_DFR_M_final.png}  \includegraphics[scale=0.3]{absint_DFR_R_final.png} \\
Context\; Graph & M\; and\; R\;of\;DFR  \\
\end{array}
$$
\caption{The context graph computed for the program and $(M,R)$ of DFR associated with arc $\{ \} \rightarrow \{ 4\}$. \label{fig:absint-final-graph} }
\end{figure} 


\end{enumerate}

\section{Pattern Decoration} \label{sec-pat-decor}
\paragraph{}
Looking at a dependence judgement deduced from a core language program, a natural question is in which concrete computation the dependence may occur.
A programmer debugging a program may wish to know where to look in order to improve the computational complexity.
For example, assume we deduced that X$_i$ is exponential with respect to X$_j$, but which computational flow can really make this happen:
 Which branches should be taken ? Which commands should be executed ? Which loops should be iterated ? How many iterations ?
This section shows how this important layer of information can also be deduced by the analysis algorithms through slight modifications to the
data structures and semantics functions.

\subsection{Preliminaries}
\bdfn 
A \emph{computational path expression} (CPE for short) is an expression given by the following syntax: 
\begin{eqnarray*}
E \; ::= & i \;|\; E \cdot E \;|\; loop(i,E) \;|\; E^k \;|\; skip \;\;\;\;\; & (i,k \in \mathbb{N})
\end{eqnarray*} 
The set of all computational path expressions is denoted by $\CPE$.     
\edfn

The different ingredients of a computational path expression are used to describe a computational path in a given core language program: 
\begin{enumerate}
\item
$i \in \mathbb{N}$ is a numeric identifier of a single command in the program. 
\item
$E \cdot E$ describes concatenation of two computational paths. 
\item
The $loop(i,E)$ expression describes that the computational path represented by $E$ within the body of loop command with identifier $i$ is executed 
as many times as allowed by the loop bound. $E$ itself may involve more than one iteration. 
\item
$E^k$ means that the computational path represented by $E$ is executed exactly $k$ times (usually inside a loop body).
\item
The $skip$ expression represents the core language \verb+skip+ command. It is useful to have a specific representation for the skip command, especially when
doing concatenation of computational paths, as can be seen from the following example:
\begin{eqnarray*}
E \cdot skip & = E 
\end{eqnarray*}      
\end{enumerate}

The strategy we take is to associate such an expression to each dependence judgement and as the dependences are composed and joined together,
so will the computational path expressions. The part of the algorithm which needs to be modified to support this association is mainly the Data Flow Relation.

\bdfn
The size of a CPE $E$ is denoted by $|E|$ and computed as follows:
\begin{eqnarray*}
|skip|  							&=& |i|\; =\; 1  \;\;\; (i \in \mathbb{N})\\
| loop(i,E)|              &=& 1 + |E| \\
|E^k|           			&=& 1 + |E| \\
|E_1 \cdot E_2| &=&  |E_1| + |E_2| 
\end{eqnarray*}
\edfn

\bdfn
We define a $best$ function for CPEs. 
Let $E_1,E_2 \in \CPE$, then:
\begin{eqnarray*}
   best(E_1,E_2) &=& \left\{\begin{array}{cc}
    										      E_1  &  \text{ if $|E_1| < |E_2|$} \\
      										      E_2  &  \text{otherwise}
   										       \end{array}\right.
\end{eqnarray*}
\edfn

\bdfn
The set of \emph{decorated dependence types} is
$\deptypes_d =  \deptypes \times \CPE$. 
For $x \in \deptypes_d$ we write $x\simeq 1$ if $x\in \big(\{1,1^+\}\times\CPE\big)$.
\edfn

Next we define a  $\sqcup$ operator for $\deptypes_d$.
\bdfn
Let $(d_1,E_1), (d_2,E_2) \in \deptypes_d$, then:
\begin{eqnarray*}
	(d_1,E_1) \sqcup (d_2,E_2) & = & \left\{\begin{array}{cc}
    										      \big(d_1,E_1 \big)                   &  \text{ if $d_1 > d_2$} \\
      										      \big(d_2,E_2 \big)                   &  \text{if $d_1 < d_2$} \\
										      \big(d_1,best(E_1,E_2)\big) & \text{if $d_1 = d_2$}
   										       \end{array}\right.
\end{eqnarray*}
\edfn

Next we define a CPE minimization effect of the $\cdot$ operator.
\bdfn \label{def-CPE-prod}
Let $E_1, E_2 \in \CPE$, then:
\begin{eqnarray*}
	E_1 \cdot E_2 & = & \left\{\begin{array}{cc}
    						     E_1      &  \text{ if $E_2 = skip$} \\
    							 E_2      &  \text{ if $E_1 = skip$} \\
      							 E^{x+y}  &  \text{if $E_1 = E^x \wedge E_2 = E^y$} \\
							     E^{x+1}_1 & \text{if $E_2 = E^x_1$} \\
							     E^{x+1}_2 & \text{if $E_1 = E^x_2$} \\
                                 E_1 \cdot E_2 & \text{otherwise} 
                               \end{array}\right.
\end{eqnarray*}
\edfn
When two CPEs are concatenated using the $\cdot$ operator, the resulting expression is minimized using definition \ref{def-CPE-prod}.
The last row (the ``otherwise" case) means that the expression could not be minimized so it remains unchanged.

\bdfn \label{def-decorated-binary-dep}
The set of \emph{decorated dependences} $\deps^d$ is the union of two sets: \\
(1) The set of \emph{decorated unary dependences}, isomorphic to 
$\inds\times\deptypes_d \times\inds$. The notation for an element is
\ $\unarydep{i}{\delta}{j}$, where $\delta \in \deptypes_d$. \\
(2) The set of \emph{decorated binary dependences}, isomorphic to %\ $\binarydeps = 
$\inds\times\inds\times\inds\times\inds\times\CPE$, where the notation for an element is
\ $(\binarydep{i}{j}{k}{\ell}, E)$.
\edfn

\bdfn
 We define the $\sqcup$ operator for two decorated unary dependence sets $M_1 , M_2$. 
\label{def-sqcup-DDFR_M}
\begin{eqnarray*}
 M_1 \sqcup M_2 =\Bigg\{ \unarydep{i}{d}{j}\; \mid & \;\Big( \exists d',d''\in \deptypes_d : \unarydep{i}{d'}{j} \in M_1 \wedge 
                          \unarydep{i}{d''}{j} \in M_2 \wedge d = d' \sqcup d'' \Big) \vee \\
                  &\Big( \unarydep{i}{d}{j} \in M_1 \wedge \not\exists d'\in \deptypes_d : \unarydep{i}{d'}{j} \in M_2 \Big) \vee \\
                  &\Big( \unarydep{i}{d}{j} \in M_2 \wedge \not\exists d'\in \deptypes_d : \unarydep{i}{d'}{j} \in M_1 \Big)\Bigg\} 
\end{eqnarray*}
\edfn 

\bdfn
 We define the $\sqcup$ operator for decorated binary dependence sets $R_1 , R_2$. 
\label{def-sqcup-DDFR_R}
\begin{eqnarray*}
 R_1 \sqcup R_2 &=& \\
 R_1 \cup R_2 &\setminus& 
              \{ (\binarydep{i}{i'}{j}{j'},E_1) | \; 
                               \exists (\binarydep{i}{i'}{j}{j'},E_2) \in  R_1 \cup R_2 : 
                                          E_2 \neq E_1 \wedge E_2 = best(E_1,E_2) \} 
\end{eqnarray*}
\edfn 

\bdfn
The decorated unary dependence set product is identical to the unary dependence set product 
which was defined in section \ref{def-unary-set-prod}, with one difference - 
the $\sqcup$ operator now operates on elements of $\deptypes_d$ and not $\deptypes$:
\begin{eqnarray*}
 M_1 \cdot M_2 = &\{\unarydep{i}{d}{j}\; |\;\exists k\in V, \;\; d',d'' \in \deptypes_d\; :  \; \unarydep{i}{d'}{k} \in M_1 \wedge \unarydep{k}{d''}{j} \in M_2 \wedge d = d' \sqcup d'') 
\end{eqnarray*}
\edfn

\bdfn
For a decorated unary dependence set $M$, $\onesof(M)$ is the set of dependences in $M$ labeled by $d$ where $d \in (\{1, 1^+\} \times \CPE)$ .
\edfn


\bdfn
For a decorated unary dependence set $M$, $C_2 (M)$ is a decorated binary dependence set defined as:
\begin{eqnarray*}
C_2(M) & = \Big\{ &\big(\binarydep{i}{i'}{j}{j'},E\big) \; \mid \\  
       &          &E \in \CPE \wedge \exists d_1,d_2 \in \deptypes_d \;:\; \unarydep{i}{d_1}{i'} \in M \wedge \unarydep{j}{d_2}{j'} \in M \Big\} 
\end{eqnarray*}

and $C_2(M,E)$ is:
\begin{eqnarray*}
C_2(M,E) & = \Big\{ &\big(\binarydep{i}{i'}{j}{j'},E\big) \; \mid 
  \exists d_1,d_2 \in \deptypes_d \;:\; \unarydep{i}{d_1}{i'} \in M \wedge \unarydep{j}{d_2}{j'} \in M \Big\} 
\end{eqnarray*}

\edfn

\paragraph{The Id function.} \label{def-ID-operator}
Given a core language command $C$, we use a \emph{unique command identifier function} $\ID : {\tt Command} \rightarrow \CPE$.
\begin{eqnarray*}
	\ID(C) & = & \left\{\begin{array}{cc}
    										      i \in \mathbb{N},  &  C \neq {\tt Skip} \\
      										      skip,              &  {\text otherwise} 
   					       \end{array}\right.
\end{eqnarray*}
In the above definition $i \in \mathbb{N}$ is a unique identifier associated with command C
(e.g., the index of the command in a pre-order traversal over the program AST).

\subsection{Decorated Data Flow Relation}
A \emph{decorated data flow relation}, or $DDFR$ for short, is $(M,R)$ where ${M \subseteq V \times \deptypes_d \times V}$ and 
$R\subseteq V \times V \times V \times V \times \CPE$. 

Some formal definitions of $DDFR$ and its operations.

\paragraph{DDFR Operations.} \label{sec-DDFR-operations}
For compact notation, instead of writing $(\binarydep{i}{j}{i'}{j'}, E) \in R$ we may write $R(i,j,i',j',E)$.
\begin{enumerate}
\item \label{def-DDFR-Sum}
Sum of DDFRs.
$$(M_1,R_1)\sqcup(M_2,R_2) \stackrel{def}{=} (M_1\sqcup M_2, 
 (R_1\sqcup R_2)\cap C_2(\onesof(M_1\sqcup M_2))).$$ 

\item \label{def-DDFR-Prod}
Product of DDFRs.
Let $(M,R)$, $(M',R')$ be decorated dataflow relations;
$$(M,R)\cdot (M',R') \stackrel{def}{=} (M'', R''),$$
where: 
\begin{eqnarray*}
M'' &=& (M \cdot M')\sqcup \{ \longunarydep{i}{(2,E \cdot E')}{j} \mid \exists s\ne t . R(i,s,i,t,E) \land R'(s,j,t,j,E') \} \\
R'' &=&
\{ (\binarydep{i}{i'}{j}{j'},E \cdot E') \in \uop{\onesof(M'')}
 \mid \exists s,t . R(i,s,i',t,E) \land R'(s,j,t,j',E') \}
\\ &\cup&
 (\binarydep{i}{i}{j}{j'},E \cdot E') \in \uop{\onesof(M'')} 
 \mid \exists s . (\longunarydep{i}{(d,E)}{s})\in\onesof(M) \land R'(s,j,s,j',E') \}
\\ &\cup&
 (\binarydep{i}{i'}{j}{j},E \cdot E') \in \uop{\onesof(M'')}
 \mid \exists s . R(i,s,i',s,E) \land (\longunarydep{s}{(d,E')}{j})\in\onesof(M') \}.
\end{eqnarray*}

\item Loop Correction (for loop variable $\X_\ell$): for a DDFR $(M,R)$, define $LC_\ell(M,R,i) = (M',R')$ where $i \in \mathbb{N}$ and
$M'$ is identical to $M$ except that:
\begin{enumerate} 
\item
 For all $j$ such that $(\unarydep{j}{d}{j},E) \in M$, $d \ge 2$, $M'$ will contain $\big(\unarydep{l}{3}{j},loop(i,E)\big)$. 
\item \label{def-LC-2}
 For all $j$ such that $(\unarydep{j}{1^+}{j},E) \in M$, $M'$ will contain $\big(\unarydep{l}{2}{j},loop(i,E)\big)$.
\end{enumerate}
and $R' = R\cap C_2(\onesof(M'))$
\end{enumerate}

\subsection{Adding Pattern Decoration to the Analysis Algorithms}
Now that we defined a DDFR, enhancing the analysis algorithms to support pattern decoration is easy.
The context graph arcs computed for each command will now be associated with decorated dataflow relations instead of DFRs.
The only thing left to do is to insert the analysed command $\ID$ into the patterns which decorate the dependences.
We do so by modifying (slightly) our analysis semantics.

\begin{enumerate}
\item{Analysis of the Assignment Commands.} 
The assignment commands analysis should result with a DDFR where each dependence is decorated with a pattern
matching exactly the command being analysed. For this purpose we have defined the $\ID$ function in \ref{def-ID-operator}.
For instance, consider the variable assignment command ${\tt C} = {\tt X}_i {\tt \;:=X}_j$, the semantics given to it
by a pattern decorating abstract interpreter analysis are: 
\begin{eqnarray*}
\lsem {\tt X}_i {\tt \;:=X}_j \rsem PC  &=& Context \; graph \; G \; where: \\ 
  &1.&  E(G) = \{ (P,P^{Var}_{i,j}) \;| \; P \in PC \} \\
  &2.&  \forall P \in PC : G_{P,P^{Var}_{i,j}} = (M,R) \; s.t. \\
  && M = \{ \longunarydep{j}{\big(1,\ID({\tt C})\big)}{i} | j \not \in P  \} \cup 
         \{ \longunarydep{k}{\big(1,\ID({\tt C})\big)}{k} | k \not \in P \wedge k \neq i \}   \\
  && R = C_2(\onesof(M),\ID({\tt C}))
\end{eqnarray*}
Note that:
\begin{enumerate}
\item
The only differences are that the dependences in $M$ are decorated with $\big((1,\ID({\tt C})\big)$ and dependences in $R$ are decorated with $\ID({\tt C})$.
\item
All assignment command types semantics can be modified by replacing each dependence type $d$ with
an ordered pair $\big(d,\ID(C)\big)$ and calculating $R$ using the same method.
\item
The naive analysis algorithm semantics can be modified in the exact same way.
\end{enumerate}

\item{The \verb+Skip+ Command Semantics.}
The \verb+Skip+ command semantics are defined by means of an assignment statement therefore it does not need to be modified.
However we note that the $\ID$ function maps any \verb+Skip+ to a $skip$ expression.

\item{Analysis of Command Sequence and the \verb+choose+ Command.}
The semantics of command sequence (C$_1$;C$_2$) and the \verb+choose+ command are not modified as they are already
defined using the $\cdot$ and $\sqcup$ operators respectively and these operators are well defined for context graphs and
DDFRs (see \ref{sec-DDFR-operations}).

\item{Analysis of the \verb+loop+ Command.}
The loop command semantics are defined as before, but the $LCG_l$ operator is redefined in order to associate
the loop command $\ID$ with the decorated patterns. 
\bdfn
Let $i \in \mathbb{N}$, $G$ a \emph{context graph}, then $LCG_\ell(i,G)$ is defined as follows: \newline
\begin{eqnarray*}
\forall (P_0,P_2) \in E(G) : &  & \\ 
   LCG_\ell(i,G)_{P_0,P_2} & = \\
   G_{P_0,P_2} & \sqcup & \Big( \bigsqcup_{(P_0,P_1),(P_1,P_1),(P_1,P_2) \in E(G)} (G_{P_0,P_1}\cdot LC_\ell \big(G_{P_1,P_1},i\big) \cdot G_{P_1,P_2}) \Big) \\
\end{eqnarray*}
\edfn
In the expression  $LC_\ell \big(G_{P_1,P_1},i\big)$, the ``$LC_\ell$" operator is the one defined for $DDFR$s.

Using the modified $LCG_{\ell}$ operator, the semantics of a loop command $L = {\tt loop \; X}_\ell \; {\tt C}$ are defined as:
\begin{eqnarray*}
%modified loop command analysis semantics:
\lsem L \rsem PC = \lsem{\tt loop \; X}_\ell \; {\tt C} \rsem PC &=& LCG_l \big(\ID(L),{\tt fix}(C,PC)\big)    
\end{eqnarray*}

\end{enumerate}
\newpage
\subsection{Interpreting the Pattern Decoration Result}
We shall explain how the decorated pattern should be interpreted by revisiting figure \ref{fig:naive-final-graph}.
For example, assume that the unique identifiers of each command are as written to the left of the commands:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
5   loop X$_1$ {
0      X$_2$ := X$_3$+X$_4$
4      ;
3      choose {
1          X$_3$ := X$_2$ 
       } or { 
2          X$_4$ := 0 
       }
    }
\end{Verbatim}
The DDFR on arc $\{\}\rightarrow\{4\}$ which is computed by the naive analysis algorithm (enhanced with pattern decoration), has the
exact same arcs and dependence types as the DFR in figure \ref{fig:naive-final-graph}, 
however the DDFR will also have computational path expressions decorating its arcs.
For instance, the CPE $E$, on arc $\longunarydep{1}{(2,E)}{2}$ is:
\begin{eqnarray*}
 E = loop(5,0 \cdot 1)\cdot 0 \cdot 2
\end{eqnarray*}
This expression matches the following computational path in the program:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
5  loop X$_1$ {
0     X2 := X3 + X4;
1     X3 := X2       (taking the first branch)
   };
0  X2 := X3 + X4;
2  X4 := 0           (taking the second branch)
\end{Verbatim}
Note that while this computational path may cause X$_2$ to exhibit polynomial growth in respect to X$_1$, it is not necessarily the minimal length 
computational path which may result with this dependence - the same computational path without the last two commands may also imply the
same dependence. The pattern decoration output extension yields an \emph{example} of a computational path which may result with the
computed dependence, however it does not guarantee that this example path is minimal in length.

\subsection{The Pattern Decoration Effect on Analysis Runtime Complexity}
Most of the modifications done in order to support the pattern decoration are done in the DDFR and its operations, however it is easy to see that
any of the newly defined DDFR operations execute at most one more step for each dependence being processed - the step of updating the CPE associated with the dependence.
This kind of update can be done in $O(1)$ (generating new atomic expression, concatenating two expressions or adding power to an expression), so the overhead of processing
the CPEs is just $O(1)$ for every dependence computation in the original algorithm.  
\newpage
\section{Algorithm Implementation}
The implementation of the described algorithms was done using Microsoft .NET technology, more specifically the \verb+F#+ and \verb+C#+ programming languages were used.
The front-end application is named ``\CoreReset", as it analyses the \emph{core} language enhanced with a \emph{reset} command.
The \CoreReset application user manual can be found in appendix \ref{app-core-reset-man}.
Following is an architecture overview and some specifics of the implementation.
\subsection{Architecture Overview}
\paragraph{Assemblies.}
The implementation is split into two assemblies - the algorithms implementation and the front-end application.
The algorithms and data structures are implemented using the \verb+F#+ language in the \Algorithms assembly (DLL).
The front-end user interface is implemented using the \verb+C#+ language in the \CoreReset assembly (Executable).
As can be seen in figure \ref{fig:CoreReset-packages}, the \CoreReset assembly references the \Algorithms assembly.
\begin{figure}[t]
\begin{tikzpicture} 
\begin{umlpackage}[x=0,y=0]{Algorithms} 
\end{umlpackage} 
\begin{umlpackage}[x=5,y=0]{CoreReset} 
\end{umlpackage} 
\umluniassoc[geometry=--,pos1=1.9,pos2=0.2]{CoreReset}{Algorithms} 
\end{tikzpicture}
\caption{CoreReset Assembly Dependency \label{fig:CoreReset-packages}}
\end{figure}

\paragraph{The Analysis Sequence.}
The \CoreReset application enables load from file (``.core") or manual entry of core language programs.
When the analysis is executed, the core language program is parsed using the \verb+F#+ version of lex and yacc (\emph{fslex} and \emph{fsyacc}).
The result is an AST of the core language program. This AST is the input to the analysis algorithm.
Figure \ref{fig:CoreReset-AnalysisSequence} shows a diagram visualizing this sequence.
The sequence diagram columns are:  
\begin{enumerate}
\item{Application.}
Stands for the application and user interface components (located in the \CoreReset assembly).

\item{Parser.}
The fslex/fsyacc generated parser for the core language (located in the \Algorithms assembly).

\item{AnalysisThread.}
The analysis is executed on a different thread than the one running the application user interface.
This makes the user interface responsive during the analysis execution and allow the user to abort the analysis.
The analysis thread is implemented in the \CoreReset assembly.

\item{Algorithm.}
Refers to an algorithm implementation (naive, abstract interpreter or the BJK algorithm (the algorithm of ~\cite{BJK08}).
The algorithm to be executed is determined by the application, 
however the algorithms implementation is located in the \Algorithms assembly.
\end{enumerate}

\begin{figure}[t]
\begin{tikzpicture} 
\begin{umlseqdiag} 
\umlobject{Application} 
\umlobject{AnalysisThread} 
\umlobject{Algorithm} 
\umlobject{Parser} 

\begin{umlcall}[op={Start Analysis}, padding =3]{Application}{Application} 
  \begin{umlcall}[op={Parse core language program}, return={AST}, padding =3]{Application}{Parser} 
  \end{umlcall} 
  %\begin{umlcall}[op={Prepare AST for analysis}, return={Extended AST}]{Application}{Application} 
  %\end{umlcall}
  \begin{umlcall}[op={Start analysis thread}, return={Context Graph}]{Application}{AnalysisThread} 
    \begin{umlcall}[op={Analyse AST}, return={Context Graph}, padding =5]{AnalysisThread}{Algorithm} 
    \end{umlcall} 
  \end{umlcall}
  \begin{umlcall}[op={Display results(Text/Graph)}]{Application}{Application} 
  \end{umlcall}
\end{umlcall}
\end{umlseqdiag} 
\end{tikzpicture}
\caption{The Analysis Sequence \label{fig:CoreReset-AnalysisSequence}}
\end{figure}

\subsection{From Analysis Semantics to Implementation.}
\paragraph{Context Graph Memoization.}
When taking the abstract interpreter algorithm analysis semantics given in section \ref{absint-algo}, 
one might think that when a command is re-analysed(e.g., when analysing a command inside a loop body), 
its context graph is re-computed on each analysis ``iteration". 
Re-computing the context graph is redundant as given a specific set of pre-contexts, and a command $C$, $\lsem C \rsem PC$ is definite, 
so unless $PC$ changed from one analysis of the command to the next, the result context graph will be the same.
Moreover, given that $G$ is the context graph last computed for command $C$, if $PC \subseteq Pre(G)$ then $G$ will contain all dependences which may be computed
from $C$ and $PC$, possibly with higher dependence type. In this case, it is possible to perform memoization of $G$ and return it without re-analysing the command.
In our implementation the last context graph computed for each core language command is ``attached" to the command in the AST, 
making the AST an extended one as it holds also a context graph for each command (and an identifier, due to the output extension described in section \ref{sec-pat-decor}).
\paragraph{Loop Command Analysis Optimization.}
As one might notice, the abstract interpreter semantics for the loop command requires computing the series defined in \ref{def-F-series}, 
and as the first step the \verb+Skip+ command semantics are computed (given a set of pre-contexts).
In order to avoid re-computation of the \verb+Skip+ command semantics every time a loop is re-analysed, every loop command 
\verb+loop X { C }+ is replaced with \verb+loop X { choose { Skip } or { C } }+. 
The semantics of the \verb+choose+ command are:
\begin{eqnarray*}
\lsem {\tt choose} \{ {\tt Skip} \} {\tt or} \{ {\tt C} \} \rsem PC & = & (\lsem {\tt Skip} \rsem PC ) \sqcup ( \lsem {\tt C} \rsem PC )
\end{eqnarray*}  
I.e., the context graph computed for the \verb+Skip+ command is memoized on the command and joined with the context graph computed for \verb+C+.
This eliminates the need to re-compute $F(C,PC)_0$, if $PC \subseteq Pre(G)$ and $G$ is the context graph last computed for the \verb+Skip+ command.

\paragraph{}
The analysis process is therefore preceded by the following actions:
\be
\item
Parse the core language program into an AST.
\item
Modify the AST by replacing every loop command \verb+loop X { C }+ with \\
\verb+loop X { choose { C } or { Skip } }+. 
(Note: this action is required only by the abstract interpreter analysis).
\item
Extend the AST by attaching an empty context graph and an identifier to each command.

\ee

\subsection{Performance Comparison}
A performance test was executed by analysing several core language programs (all found in appendix \ref{app_core_prog})
using the naive and abstract interpreter analysis algorithms. 
All performance tests were done on a quad-core Intel Core i3 2.27GHz CPU. 
The performance results are found in figure \ref{fig:perf-comp}.
From the comparison results it is clear that the top-down abstract interpreter approach achieved better run-time on all of the 
analysed programs. In one case (\ref{appendix_prog_A6}) the abstract interpreter was approximately 17 times faster than the naive algorithm.
\newpage
\begin{figure}[t] 
\begin{tikzpicture}
\begin{axis}[
    xbar,
 	{/pgfplots/enlarge y limits=auto},
 	{/pgfplots/enlarge x limits=upper},
    legend style={at={(0.5,-0.15)},
      anchor=north,legend columns=-1},
    xlabel={Time taken[sec]}, width=400pt,
    ylabel={Analysed Program},
    symbolic y coords={A.1,A.2,A.3,A.4,A.5,A.6, A.7},
    ytick=data,
    y tick label style={/pgf/number format/.cd,%
                        scaled y ticks = false,
                        set thousands separator={},
                        fixed},
    x tick label style={/pgf/number format/.cd,%
                        scaled x ticks = false,
                        set decimal separator={,},
                        fixed},%
    nodes near coords,
	every node near coord/.append style = {
        anchor=west,
        /pgf/number format/.cd,
        fixed,
        fixed zerofill,
        precision=3},
    nodes near coords align={horizontal}
    ]
\addplot coordinates {(0.011,A.1) (0.016,A.2) (0.171,A.3) (1.094,A.4) (1.248,A.5) (54.382,A.6) (531.651,A.7)};
\addplot coordinates {(0.006,A.1) (0.008,A.2) (0.015,A.3) (0.023,A.4) (0.811,A.5) (4.383,A.6) (31.240,A.7)};
\legend{Naive Algorithm, Abstract Interpreter Algorithm}
\end{axis}
\end{tikzpicture}
\caption{Performance comparison between the Naive and Abstract Interpreter algorithms. \label{fig:perf-comp}}
\end{figure}


%\begin{enumerate}
%\item Sum of DDFRs:
%$$(M_1,R_1)\sqcup(M_2,R_2) \stackrel{def}{=} (M_1\sqcup M_2, 
% (R_1\sqcup R_2)\cap C_2(\onesof(M_1\sqcup M_2))).$$ 
%Note the main difference is that instead 
%\item Product of dataflow relations:
%Let $(M,R)$, $(M',R')$ be dataflow relations;
%$$(M,R)\cdot (M',R') \stackrel{def}{=} (M'', R''),$$
%where: 
%\begin{eqnarray*}
%M'' &=& (M \cdot M')\sqcup \{ (i\stackrel{2}{\to} j, E_1 \cdot E_2) \mid \exists s\ne t . R(i,s,i,t,E_1) \land R'(s,j,t,j,E_2) \} \\
%R'' &=&
%\Big\{ (\binarydep{i}{j}{i'}{j'},E_1 \cdot E_2) \mid \{i\to j, i'\to j'\} \in \uop{\onesof(M'')}
%  \exists s,t . R(i,s,i',t,E_1) \land R'(s,j,t,j',E_2) \Big\}
%\\ &\cup&
%\Big\{ (\binarydep{i}{j}{i'}{j'},E_1 \cdot E_2) \mid  \{i\to j, i\to j'\}\in \uop{\onesof(M'')} 
% \wedge \exists s . (i,s)\in\onesof(M) \land R'(s,j,s,j') \Big\}
%\\ &\cup&
%\Big\{ \{i\to j, i'\to j\}\in \uop{\onesof(M'')}
% \mid \exists s . R(i,s,i',s) \land (s,j)\in\onesof(M') \Big\}.
%\end{eqnarray*}

%\emph{Remark 1:} The purpose of the product is to describe the result of
%sequential composition of commands. The rule defining $M''$ is in fact the
%\emph{raison d'\^etre} of the $R$ part. The summand dependent on $R$
%``catches'' commands whose effect
%is to double a variable's value by adding two copies of it, a situation
%described by \emph{the diamond}:

%\begin{picture}(200,80)(0,65)
%\put(49,100){$i$}
%\put(98,70){$t$}
%\put(98,130){$s$}
%\put(150,100){$j$}
%\put(55,107){\vector(2,1){40}}
%\put(55, 95){\vector(2,-1){40}}
%\put(105,127){\vector(2,-1){40}}
%\put(105, 75){\vector(2,1){40}}
%\end{picture}
%
%Doubling is important because if the net effect of a command is
%to set \pgt{X} to twice its initial value, repeating this command in a
%loop generates exponential growth. The reader may have guessed that if this
%situation occurs in analyzing a program, the two meeting arcs will necessarily
%be labeled with $1^+$.
%
%\emph{Remark 2:} the product is associative and distributes over $\sqcup$,
%i.e., $$(M,R)\cdot ((M_1,R_1)\sqcup(M_2,R_2)) =
%       ((M,R)\cdot (M_1,R_1))\sqcup((M,R)\cdot (M_2,R_2))\,.$$
%(the reader may enjoy proving these properties as a way to gain insight into
%our definitions).


\section{Conclusion}
This document presented algorithms for precise growth-rate analysis based on the proof system of ~\cite{BA10}. 
In particular, our algorithms handle a reset-to-zero command added to the language of~\cite{BJK08}.
The implementation was done by taking two approaches - the naive bottom-up and the top-down abstract interpreter.
While both approaches yield precise results, performance tests showed the top-down approach to be more efficient.
In addition, the algorithms output was extended with patterns which reveal an exact computational flow 
which results with each computed data dependency. 

\subsection{Future Work}

\subsubsection{Simple Context Graphs.}
The abstract interpreter algorithm presented can be made even more efficient by noting that many computed context graphs can be kept ``simple",
without elaborating all possible pre-contexts and post-contexts. 
We can define an abstract semantics to be \emph{simple} if the DFR is independent of the context. 
In this case, it is also possible to rebuild the post-context easily from the pre-context and the DFR, so only the DFR has to be kept in memory. 
A further possible improvement is to be able to represent the abstract semantics of a command in two parts, a ``simple" one and a context graph. 
The promise of this approach is precisely to make the context graphs smaller, possibly exponentially so. 

\subsubsection{Definite Loops.}
In ~\cite{BK11}, it was shown that the feasibility problem is decidable in PTIME for the core language with definite loops and max assignments.
It should be possible to implement such an analysis based on the algorithm of ~\cite{BJK08}.


\subsection*{Acknowledgement}
\be
\item
The code used for the implementation of the ~\cite{BJK08} algorithm and the DFR data structure is an adaptation to \verb+F#+ of 
the original ML source code by Prof. Ben Amram.
\item
The CoreReset application user guide was generated using \verb+chmProcessor+ \\ (http://chmprocessor.sourceforge.net).
\ee

\bibliography{icc,sct}
\newpage
%\begin{appendices}
\appendix
\appendixpage
\noappendicestocpagenum
\addappheadtotoc

\section{Core Language Example Programs} \label{app_core_prog}
\subsection{}\label{appendix_prog_A1}
X$_3$ grows exponentially inside the loop body, starting from the second loop iteration.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
X$_1$ := 0;
loop X$_2$ {
	X$_3$ := X$_3$ + X$_1$;
	X$_1$ := X$_3$		
}
\end{Verbatim}

\subsection{}\label{appendix_prog_A2}
X$_3$ may grow exponentially inside loop body if the second branch is taken at least once, and assigns  a non-zero value to X$_1$.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
X$_1$ := 0;
loop X$_2$ {
	choose {
		X$_3$ := X$_3$ + X$_1$
	}	
	or {
		X$_1$ := X$_3$
	}		
}
\end{Verbatim}
\newpage
\subsection{}\label{appendix_prog_A3}
X$_3$ may grow exponentially if only the first branch of the choose-or statement is taken on every loop iteration.
X$_3$ may also be reset to zero if the second branch is taken for at least two consecutive loop iterations.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_1$ {
	choose {
		X$_2$ := X$_3$
	}	
	or {
		X$_2$ := 0
	};	
	X$_3$ := X$_2$ + X$_4$;
	X$_4$ := X$_2$	
}
\end{Verbatim}

\subsection{}\label{appendix_prog_A4}
X$_3$, X$_1$ and X$_4$ may grow exponentially if a different branch on each loop iteration, 
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
X$_1$ := 0;
loop X$_2$ {
	choose {
		X$_3$ := X$_1$ + X$_4$
	}	
	or {
		choose {
			X$_4$ := X$_3$
		}
		or {
			X$_1$ := X$_3$
		}
	}		
}
\end{Verbatim}
\newpage
\subsection{}\label{appendix_prog_A5}
A more complicated program with 3 nested loops and 6 variables. Used as a performance benchmark.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_1$ {
	loop X$_3$ {
		choose {
			loop X$_7$ {
				X$_8$ := X$_8$ + X$_1$;
				choose {
					X$_9$ := X$_1$ * X$_3$
				}
				or {
					X$_8$ := 0
				}
			}
		}
		or {
			X$_9$ := 0
		}
	}
};
loop X$_9$ {
	loop X$_7$ {
		choose {
			loop X$_3$ {
				X$_2$ := X$_2$ + X$_9$;
				choose {
					X$_1$ := X$_9$ * X$_7$
				}
				or {
					X$_2$ := 0
				}
			}
		}
		or {
			X$_1$ := 0
		}
	}
}
\end{Verbatim}

\newpage
\subsection{}\label{appendix_prog_A6}
An even more complicated program with 7 nested loops and 9 variables. Used as a performance benchmark.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
choose {
	loop X$_1$ {
	  loop X$_2$ {
		loop X$_3$ {
		  loop X$_4$ {
			loop X$_5$ {
			  loop X$_6$ {
				choose {
				  loop X$_7$ {
				    X$_8$ := X$_8$ + X$_1$;
					choose {
					  X$_9$ := X$_1$ * X$_3$
					}
					or {
					  X$_8$ := 0
					}
				  }
				}
				or {
				  X$_9$ := 0
				}
			  }
			}
		  }
	    }
	  }
	}
}
\end{Verbatim}
\newpage
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
or {
  loop X$_9$ {
	loop X$_8$ {
  	  loop X$_7$ {
		loop X$_6$ {
		  loop X$_5$ {
			loop X$_4$ {
		  	  choose {
				loop X$_3$ {
				  X$_2$ := X$_2$ + X$_9$;
				  choose {
					X$_1$ := X$_9$ * X$_7$
				  }
				  or {
					X$_2$ := 0
				  }
				}
			  }
			  or {
				X$_1$ := 0
			  }
			}
		  }
		}
	  }
	}
  }
}
\end{Verbatim}

\subsection{}\label{appendix_prog_A7}
Let \verb+C+ represent the program of \ref{appendix_prog_A6}, then the program is:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
C;C;C;C;C;C;C;C
\end{Verbatim}

\newpage

\section{CoreReset Application User Manual} \label{app-core-reset-man}
\end{document}

