%\documentclass[copyright,creativecommons,noncommercial]{eptcs}
\documentclass[11pt]{article}
\providecommand{\event}{} % Name of the event you are submitting to
%\documentclass[runningheads,10pt]{llncs}
\usepackage{amsthm}
\usepackage[leqno]{amsmath}
\usepackage{easybmat}
\usepackage{amssymb}
%\usepackage{work} %Neil's definitions
\usepackage{fancyvrb}

% DIMENSION OF TEXT:
%\textwidth 140mm         % Width of text line.

% ORDINARY DEFINITIONS 

\newtheorem{theorem}{THEOREM}[section]
\newtheorem{obs}[theorem]{OBSERVATION}
\newtheorem{corol}[theorem]{COROLLARY}
\newtheorem{lemma}[theorem]{LEMMA}
\newtheorem{claim}[theorem]{CLAIM}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{construction}[theorem]{Construction}
\newtheorem{alg}{Algorithm}[section]

\newtheorem{exmp}{Example}[section]

% DEFINITIONS SPECIAL TO THIS TEXT

\renewcommand{\vec}{\mathaccent"017E}  %LNCS defines \vec as boldface italic, fy!
\newcommand{\deptypes}{{\mathbb{D}}}
\newcommand{\deps}{{\mathbb{F}}}
% \newcommand{\unarydeps}{{\mathbb{U}}}
% \newcommand{\binarydeps}{{\mathbb{B}}}
\newcommand{\unarydep}[3]{{#1\xrightarrow{\makebox[1ex][l]{$\scriptstyle #2$}} #3}}
\newcommand{\longunarydep}[3]{{#1\xrightarrow{#2} #3}}
%\newcommand{\unarydep}[3]{{#1\stackrel{#2}{\rightarrow} #3}}
\newcommand{\binarydep}[4]{{{#1}\atop{#2}}\rightrightarrows {{#3}\atop{#4}}}
%\newcommand{\meetdep}[3]{{{#1}\atop{#2}}\rightrightarrows {#3}}
\newcommand{\dep}[3]{{\tt#1}\stackrel{#2}{\to}{\tt#3}}  %OLD DEF
\newcommand\scc{\mbox{\rm SCC}}
\newcommand\ep[1]{{\mathbf #1}}
\newcommand\safe[1]{{{\cal S} #1}}
\newcommand\xmin{{x_{\it min}}}
\newcommand\ymin{{y_{\it min}}}
\newcommand\zmin{{z_{\it min}}}
\newcommand{\mytag}[1]{\tag{#1}}
\newcommand\twodots{..}
\newcommand\Lbjk{L_{\text{BJK}}}

% OTHER DEFS (mostly from NDJ)

\newcommand\st{\colon} %such that
%\newcommand\setminus{-}  %any better idea?
\newcommand{\pass}{\texttt{:=}\,}
\newcommand{\semi}{\texttt{;}}
\newcommand{\X}{\texttt{X}}
\newcommand{\Y}{\texttt{Y}}
\newcommand{\Z}{\texttt{Z}}
\newcommand{\C}{\texttt{C}}
\newcommand{\U}{\texttt{U}}
\newcommand{\N}{\texttt{N}}
\newcommand{\lsem}{\mbox{$\lbrack\hspace{-0.3ex}\lbrack$}}
\newcommand{\rsem}{\mbox{$\rbrack\hspace{-0.3ex}\rbrack$}}
\newcommand{\sempar}[1]{\mbox{\lsem\pgt{#1}\rsem}}
\newcommand{\ints}{\mathbb{Z}}
\newcommand{\nats}{\mathbb{N}}
\newcommand{\pgt}[1]{{\tt #1}}
\newcommand{\inds}{V}

\newcommand{\bthm}{\begin{theorem}}
\newcommand{\ethm}{\end{theorem}}
\newcommand{\blem}{\begin{lemma}}
\newcommand{\elem}{\end{lemma}}
\newcommand{\bprf}{\begin{proof}}
\newcommand{\eprf}{\end{proof}}
\newcommand{\bdfn}{\begin{definition}}
\newcommand{\edfn}{\end{definition}}
\newcommand{\be}{\begin{enumerate}}
\newcommand{\ee}{\end{enumerate}}


\begin{document}
%LNCS stuff
% \authorrunning{Mann}
% \titlerunning{Automatic Complexity Analysis of Simple Imperative Programs}

\title{Automatic Complexity Analysis of Simple Imperative Programs}
\author{Zachi Mann}
%\institute{School of Computer Science, Tel-Aviv Academic College}

\maketitle   


\section{Introduction}
This document describes a set of algorithms designed to solve an intriguing question:
given a computer program, what is its complexity class ? i.e., is it linear, polynomial or exponential?
For turing-complete languages this property is proven to be undecidable, hence the algorithms target a certain 
abstracted "core" language which is not turing complete but is still interesting enough to be discussed.
This core language was first described in ~\cite{BJK08} and in ~\cite{BA10} it was extended with a command that resets variables to zero (\verb+X := 0+).
The analysis algorithms descriped in this document apply the inference rules described in ~\cite{BA10}, but rely heavily on ~\cite{BJK08} when it comes to the data structure and implementation.
\section{Problem Statement}
\label{sec:goals}

First, we define our core language, $L_r$.

\paragraph{Syntax} is described in Figure 1 and should be self-explanatory.
In a command $\verb+loop+\; \X \; \verb+{C}+$,
variable $\X$
is not allowed to appear on the left-hand side of an assignment in the loop
body {\tt C}. There is no special syntax for a ``program.'' %which is just a command.

\begin{figure}[t]
$$ \renewcommand{\arraystretch}{1.3}
\begin{array}{rcl}
\verb+X+,\verb+Y+\in\mbox{Variable} &\;\; ::= \;\; & \X_1 \mid\X_2 \mid \X_3 \mid
 \ldots  \mid \X_n\\
\verb+e+\in\mbox{Expression} & ::= & \verb+X+ \mid \verb/X + Y/ \mid 
\verb+X * Y+\mid\verb+0+\\
\verb+C+\in\mbox{Command} & ::= & \verb+skip+ \mid \verb+X:=e+ 
                                \mid \verb+C+_1 \semi \verb+C+_2 
                                \mid \texttt{loop} \; \X  \; \texttt{\{C\}} \\
                         & \mid & \texttt{choose}\; \{ \C_1 \} \; \texttt{or} \; \{ \C_2 \}
 \end{array} \renewcommand{\arraystretch}{1.0}$$

\caption{Syntax of the core language $L_r$. Variables hold nonnegative
integers. \label{fig-syntax}}
\end{figure}

\paragraph{Data \& Semantics}The only type of data processed by the core language is
nonnegative integers. Even though real programs can manipulate negative or non-integer values, those are usually not relevant to loop control so they could be dropped when abstracting a real language into the core language.
The semantics of the core language is intended for over-approximating a realistic
program's semantics. Therefore, the core language is nondeterministic.
The {\tt choose} command represents a nondeterministic choice, and can be used to abstract
any concrete conditional command by simply ignoring the condition.
The {\em loop command\/} 
$\verb+loop+\,\X_\ell\,\verb+{C}+$ repeats \pgt{C} a number of times
bounded by the value of $\X_\ell$. Thus, it 
may be used to model different kinds
 of loops (for-loops, while-loops).
%as long as a bounding expression can be statically determined (possibly by
%an auxiliary analysis such as~\cite{CS:01,PR:04}).
The use of bounded loops restricts the computable functions
to be primitive recursive, but this is still rich enough to make
the analysis problem challenging.

\paragraph{Goals of the analysis.}  
The \emph{polynomial-bound analysis problem} is to find,
for any given command, which output variables
are  bounded by a polynomial in the input variables. This is the problem we will fix
our attention on, although we will also consider a variant:
The \emph{linear-bound problem} identifies linearly-bounded output values instead.

\paragraph{Examples.} In the following program,
variables may grow exponentially.
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_4$ {
   X$_3$ := X$_1$+X$_2$;
   choose { X$_1$ := X$_3$ } or { X$_2$ := X$_3$ };
 }
\end{Verbatim}
However, the following version is polynomially bounded:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_4$ {
   X$_3$ := X$_1$+X$_2$;
   choose { X$_1$ := X$_3$ } or { X$_2$ := 0 }
 }
\end{Verbatim}
In the next example X$_2$ may grow exponentially inside the loop body, but only if  \verb+X+$_3$\verb+ := 0+ is not executed:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
loop X$_1$ {
	choose {
		X$_2$ := X$_3$ + X$_3$
	}	
	or {
		choose {
			X$_3$ := X$_2$
		}
		or {
			X$_3$ := 0
		}
	}		
}
\end{Verbatim}
% A more complicated example:
% \begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
% loop X$_5$ {
%   choose { X$_3$ := X$_1$+X$_2$;   X$_4$ := X$_2$ }
%       or { X$_1$ := 0;   X$_4$ := X$_1$+X$_2$ };
%   X$_1$ := X$_3$ + X$_4$
% }
% \end{Verbatim}


\section{Background}

\paragraph{The Algorithm of ~\cite{BJK08}.} In ~\cite{BJK08}, Ben-Amran, Jones and Kristiansen had shown that for the
language obtained from $L_r$ by omitting the constant 0, denoted $\Lbjk$,
the polynomial-bound analysis problem is in PTIME. The algorithm that does this uses a technique of assigning a
\emph{certificate} to each command. The certificate is a finite structure
(basically, a matrix) which summarizes the input-output dependences in the command: 
how each of the output values depends on each of the inputs.
The analysis is compositional, which means that
a certificate for a composite command only depends on those of its parts.
The main novelty in the algorithm of~\cite{BJK08} was
a new kind of certificate, called a \emph{Data Flow Relation}, which encode sufficient information for
precise analysis of the chosen core language. As the \emph{Data Flow Relation} had proven to be
a convenient and relatively compact way to store the vairable data-flow information, it is also used by all
the analysis algorithms described in this document.

\paragraph{Extending the Core Language.} One of the open problems left by~\cite{BJK08} was analysis of extended
versions of the core language, and ~\cite{BA10} presented a first step
forward. The extension considered is the capability to reset a variable to zero.
One way to explain the difficulty caused by resets is an increased
context-sensitivity: for example, the command \verb/X:=Y*Z/ 
introduces a dependence of \pgt{X} on \pgt{Z},
but not if \pgt{Y} is fixed at zero.
We solve this problem by employing context-sensitive analysis according to the inference rules described in ~\cite{BA10}.
In ~\cite{BA10} there is a description of a \emph{proof system} for the lack of polynomial bounds (contrary to the a compositional analysis to certify polinomiality, as given by ~\cite{BJK08}).
We introduce some basic definitions, taken from ~\cite{BA10}:
\bdfn
The set of \emph{dependence types} is
$\deptypes =  \{1,1^+,2,3\}$, with order $1 < 1^+ < 2 < 3$, and binary maximum operator
$\sqcup$.  We write $x\simeq 1$ for $x\in\{1,1^+\}$.
\edfn

Verbally, we may refer to these types as:

$1=$\emph{identity dependence},
$1^+=$\emph{additive dependence},
$2=$\emph{multiplicative dependence},
$3=$\emph{exponential dependence}. 
\bdfn
$\inds$ is the set of all variable indices in the program.
\edfn\bdfn
A \emph{context} or \emph{reset context} is a subset of $\inds$. 
\edfn
\bdfn
A \emph{data flow}, denoted by $i \xrightarrow{d} j$, describes that the variable $X_j$ is dependent on variable $X_i$ with dependence type $d \in \mathbb{D}$.
\edfn
\bdfn
A \emph{dependence judgement} is $\C,P \vdash D,Q$ where $\C$ is a command, $P,Q$ are reset contexts and $D$ is a \emph{data flow}.
\edfn
The pre-context $P$ specifies variables that are presumed to hold zero; the post-context $Q$ specifies zeros guaranteed to come out.\newline
Following is an example of how the inference rules of ~\cite{BA10} can be applied.  For instance, the inference rule

\[\frac{ i\notin P,\ i\ne l } {
 \X_l\texttt{:=0},\, P  \vdash \unarydep{i}{1}{i},\, P \cup \{ l \}
}\]

states that for any pre-context $P$, $l$ is added to the post context, and all variables which are not $X_l$ are not modified by the command $X_l$ \verb+:= 0+.
So given the command $X_1$ \verb+:= 0+, an empty pre-context, and $V=\{1,2,3\}$, the following judgements can be inferred by this rule:

\[{
 \X_1\texttt{:=0},\, \emptyset  \vdash \unarydep{2}{1}{2},\, \{1\}  \;\; and  \;\; \X_1\texttt{:=0},\, \emptyset  \vdash \unarydep{3}{1}{3},\, \{1\}
}\]
%$\X_1\texttt{:=0},\, {}  \vdash \unarydep{2}{1}{2},\, {1} \cup \{ l \}$


One major problem is that the amount of dependence judgements generated by each command may be exponential in relation to the number of variables in the program. Thus applying the inference rules in a bottom-up manner can cause an explosion of dependence judgements. Although proving the lack of polynomial bounds using the proof system of ~\cite{BA10} is proven to be \emph{PSPACE-complete}, our implementation will not attempt to keep the space within polynomial bounds. Instead we consider optimizations to the analysis which will reduce the number of reset contexts computed (though in the worst case it can still  be exponential).

\section{Project Goals}
\subsection{Algorithm Approaches.}
The main goal of this project was to investigate the efficiency trade-off in the implemention of analysis algorithms based on the inference rules of  ~\cite{BA10}, i.e., "how much can we benefit 
from working hard on optimizing the analysis algorithm?". Therefore, the following approaches were considered:
\paragraph{The ``Naive" Approach.} the Abstract Syntax Tree is processed bottom-up and for every command the set of all true judgments are generated. 
This implies exponentiallity of the algorithm as for each command the amount of reset contexts may be exponential with respect to the number of variables in the program.
\paragraph{The Abstract Interpreter.} The program is processed in a top-down fashion, where for each command we generate all dependence judgments according to the inference rules of ~\cite{BA10} and the reset contexts that were computed so far from previous commands. This approach requires special care to avoid redundant re-calculations since commands within a loop body may be processed over and over again with the same potential results, but it also holds promise of great efficiency gain relative to the ``naive" approach since instead of considering all possible pre-contexts when processing a command, we consider only the pre-contexts which are the outcome of processing the command predecessors in the program computation sequence.

\subsection{Output Extension.}
Another goal of the project is to be able to present the analysis results in an even more informative way by extenting the output to include a description of a computational path which results
with the computed upper-bound of each variable. This was achieved by "decorating" each data flow with an expression that describes a computational path through which the data flow was computed.

\section{Technical Background (~\cite{BA10} )}
\label{sec-sop}
As mentioned before, the algorithms described in the document make use of the inference rules described in ~\cite{BA10}.
Following is a technical introduction the the \emph{proof system} of ~\cite{BA10}.
\subsection{Proof System Ingredients}

The basic ingredient in the proof system of ~\cite{BA10} is called a \emph{dependence fact}.
In its simple (unary) form, it indicates that an output variable depends, in a certain way, on some
input variable. The set of variable indices is denoted by $\inds$ with generic elements $i,j,k$
etc.

\bdfn
The set of \emph{dependences} $\deps$ is the union of two sets: \\
(1) The set of \emph{unary dependences}, isomorphic to 
$\inds\times\deptypes\times\inds$. The notation for an element is
\ $\unarydep{i}{\delta}{j}$. \\
(2) The set of \emph{binary dependences}, isomorphic to %\ $\binarydeps = 
$\inds\times\inds\times\inds\times\inds$, where the notation for an element is
\ $\binarydep{i}{j}{k}{\ell}$.
\edfn

Informally, a binary dependence represents a conjunction, namely the fact that two unary dependences hold simultaneously.
 This is only used when the dependences in question are of types $1$ or
$1^+$, and when $i\ne j \lor k\ne \ell$
 (a similar mechanism was used in~\cite{BJK08}).  

The pre-context $P$ specifies variables that are presumed to hold zero; the post-context
$Q$ specifies zeros guaranteed to come out.
 For an example, let $\C$ be the command
 $\verb+loop+\; \X_3 \; \verb+{X+_1\verb/:= X/_2\verb/+X/_3\verb+}+$.
% In the following judgements, initial values are unconstrained:
We have $ \C, \emptyset  \vdash  \unarydep{2}{1}{2},   \emptyset$ ($\X_2$ is not
modified) and $ \C, \emptyset  \vdash  \unarydep{2}{1^+}{1},   \emptyset$ ($\X_1$ may be
set to $\X_2$ plus something else).
%  \[ \renewcommand{\arraystretch}{1.5}\begin{array}{llrl}
%  \C, \{ \}  &\vdash  & \unarydep{2}{1}{2},  & \{ \} \\
%  % \C, \{ \}  &\vdash  & \binarydep{2}{3}{1}{1},  & \{ \} \\
%  \C, \{ \}  &\vdash  & \unarydep{2}{1^+}{1},  & \{ \}  \,.
%  \end{array}
%  \]
If $\X_3$ is initially zero, the loop does not execute. Therefore
$\C, \{ 3 \}  \vdash  \unarydep{2}{1^+}{1},  Q $
% \[ \begin{array}{llrl}
%  \C, \{ 3 \}  &\vdash  & \unarydep{2}{1^+}{1},  & Q 
%   \end{array}
%  \]
does \emph{not} hold for any $Q$.
However, $\C, \{ 3 \}  \vdash  \unarydep{1}{1}{1},  \{ 3 \}$
% \[ \begin{array}{llrl} \C, \{ 3 \}  &\vdash  & \unarydep{1}{1}{1},  & \{ 3 \}
%   \end{array}
%  \]
holds: $\X_1$ is not modified and $\X_3$ is guaranteed to remain zero.
 
\subsection{Inference rules for assignments}
 
 We list the {\tt skip} command among the assignments. It is, in fact, equivalent to
 $\verb+X+_1\verb/:= X/_1$.
 
\be
\item (Unary rule for \verb+skip+) \par
\[\frac{ i\notin P } { \texttt{skip},\, P  \vdash \unarydep{i}{1}{i},\, P } \]

\item (Unary rule for $\X_l\verb+:=0+$) \par
\[\frac{ i\notin P,\ i\ne l } {
 \X_l\texttt{:=0},\, P  \vdash \unarydep{i}{1}{i},\, P \cup \{ l \}
}\]

\item (Unary rules for  $\verb+X+_l\verb/:=X/_r$) \par
 For any context $P$, let
$P_{\ell,r} = P \setminus \{l\} \cup \{l\mid \text{ if $r\in P$}\}$. 
\[\frac{ i\notin P,\  i\ne l}
{\X_l\texttt{:=X}_r,\, P  \vdash \unarydep{i}{1}{i},   P_{\ell,r}}
\qquad
\frac{r\notin P}
{\X_l\texttt{:=X}_r,\, P  \vdash \unarydep{r}{1}{l},   P_{\ell,r}}
\]

\item (Unary rules for  $\verb+X+_l\verb/:=X/_r\verb/*X/_s$) \par
\label{itm:rules-mult}
  For any context $P$, let
$P_{l,r,s} = P \setminus \{l\} \cup \{l\mid \text{ if $r\in P$ or $s\in P$}\}$.

\[\frac{i\notin P,\ i\ne l}
{ \X_l\texttt{:=X}_r\texttt{*X}_s,\, P  \vdash \unarydep{i}{1}{i},\, P_{l,r,s} }
\qquad
\frac{r,s\notin P,\ t\in \{r,s\}}
{\X_l\texttt{:=X}_r\texttt{*X}_s,\, P  \vdash \unarydep{t}{2}{l},\, P_{l,r,s}
}\] 

\item (Unary rules for  $\verb+X+_l\verb/:=X/_r\verb/+X/_s$, where $r\ne s$) \par
  For any context $P$, let
$P_{l,r,s} = P \setminus \{l\} \cup \{l\mid \text{ if $r,s\in P$}\}$.

\begin{tabular}{*{2}{p{0.4\textwidth}}}
\[\frac{i\notin P,\ i\ne l}
{\X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{i}{1}{i},\, P_{l,r,s} }
\]
&
\[\frac{r\notin P,\ s\in P}
{ \X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{r}{1}{l},\, P_{l,r,s} }
\] \\
\[\frac{r\in P,\ s\notin P}
{\X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{s}{1}{l},\, P_{l,r,s} }
\]
&
\[\frac{r,s\notin P}
{\X_l\texttt{:=X}_r\texttt{+X}_s,\, P  \vdash \unarydep{t}{1^+}{l},\, P_{l,r,s} 
  \text{ for $t\in\{r,s\}$.}
}\]
\end{tabular} 


\item (Binary rules for assignments) \par
Let $\C$ be any of the above commands. If, for $i,i'\notin P$, and $j,j'\notin Q$,
where $i\ne i'$ or $j\ne j'$, we have
$\C,P  \,\vdash \,\unarydep{i}{r_1}{j},  Q$ and
$\C,P  \,\vdash \,\unarydep{i'}{r_2}{j'},  Q$, where $r_1,r_2\simeq 1$, then 
$\C,P  \,\vdash \,\binarydep{i}{i'}{j}{j'},  Q$.
\ee

\subsection{Inference rules for composite commands}

The composite commands are the choice, sequential composition and
the loop.

\paragraph*{Choice} is simplest, handled by the obvious rules:

%Choice:
\begin{equation}\mytag{C}
\frac{   {\tt C}_1, P \vdash D, Q }
{ {\tt choose\,C}_1 {\tt or\,C}_2, P \vdash  D, Q }
\qquad
\frac{   {\tt C}_2, P \vdash D, Q }
{ {\tt choose\,C}_1 {\tt or\,C}_2, P \vdash  D, Q }
\end{equation}

\paragraph{Sequential composition} requires an operator for abstract composition,
that is, composition of dependences.
 
 \bdfn
 The binary operation $\cdot$ is defined on $\deps$ by the following rules:
\[\renewcommand{\arraystretch}{2} 
\begin{array}{ccc}
   (\unarydep{i}{\alpha}{j}) \cdot (\unarydep{j}{\beta}{k}) &=& 
   \multicolumn{1}{l}{\longunarydep{i}{\alpha\sqcup\beta}{k}} \\  
   (\unarydep{i}{\alpha}{j}) \cdot (\binarydep{j}{j}{k}{k'}) &=& (\binarydep{i}{i}{k}{k'}),
\quad\text{provided $\alpha \simeq 1$} \\
    (\binarydep{i}{i'}{j}{j}) \cdot (\unarydep{j}{\alpha}{k}) &=& (\binarydep{i}{i'}{k}{k}),
\quad\text{provided $\alpha \simeq 1$} \\
   (\binarydep{i}{i'}{j}{j'}) \cdot (\binarydep{j}{j'}{k}{k'}) &=& 
\left\{\begin{array}{cl}
\binarydep{i}{i'}{k}{k'},  &  \text{ if $i\ne i'$ or $k\ne k'$} \\
\unarydep{i}{2}{k},  &  \text{if $i=i'$ and $k= k'$}
\end{array}\right.
 \end{array}
\]
 \edfn
We now have the rule
%Sequential composition:
\begin{equation}\mytag{S}
\frac{   {\tt C}_1, P \vdash D_1, Q \quad {\tt C}_2, Q \vdash D_2, R }
{  {\tt C}_1 {\tt ;C}_2, P \vdash   {D_1\cdot D_2}, \, R }
\end{equation}
Naturally, the rule is only applicable if $D_1\cdot D_2$ is defined.

\paragraph*{The loop} involves the possibility of growth that depends on the number of 
iterations.  To handle this, we introduce a \emph{loop correction} operator (not unlike
the one in~\cite{BJK08}).

\bdfn \label{def:LC}
The loop correction operator $LC_\ell : \deps\to\deps$ is defined by
\begin{align*}
LC_\ell (\unarydep{i}{1^+}{i}) &= 
   \unarydep{\ell}{2}{i}   \\
LC_\ell (\unarydep{i}{2}{i}) &= 
   \unarydep{\ell}{3}{i}  
%   \\
%LC_\ell (\unarydep{i}{1}{i}) &= 
%   \unarydep{i}{1}{i}   
\end{align*}
\edfn
Explanation: in the first case, $\X_i$ has something added to it. Intuitively, if this happens
inside a loop, it
results in growth that is at least linear in the number of iterations.
In the second case, $\X_i$ is multiplied by something, which results in exponential growth.
%The third case is just a convenience.

There are three loop rules. The first covers the case that the body is not executed.
%Loop: 
\begin{equation}\mytag{L0}
\frac{ \texttt{skip}, P  \vdash D, P } 
{{\tt loop\,X_\ell\{C\}}, P \vdash   D, P }
\end{equation}

The second describes the result of any number $m>0$ of iterations.
% \begin{equation}\mytag{L1}
% \frac{ (\exists m) (\exists P_0,\dots,P_m, D_1,\dots,D_m)
% ( \ell\notin P_0 \land (\forall i<m)\, \C, P_i  \vdash D_{i+1}, P_{i+1} ) } 
% {{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot D_2\cdot\ldots\cdot D_m}, P_m }
% \end{equation}
%%%% A better version - without \exists and \forall :
\begin{equation}\mytag{L1}
\frac{ {\tt C}, P_0  \vdash D_{1}, P_{1}\quad {\tt C}, P_1  \vdash D_{2}, P_{2}\quad\dots\quad
{\tt C}, P_{m-1}  \vdash D_{m}, P_{m}\qquad \ell\notin P_0 }
{{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot D_2\cdot\ldots\cdot D_m}, P_m }
\end{equation}

The third applies the LC operator.
% \begin{equation}\mytag{L2}
% \frac{ (\exists P_0,P_1=P_2,P_3, D_1,D_2,D_3)
% (\forall 0\le i<3)\, {\tt loop\,X_\ell\{C\}}, P_i  \vdash D_{i+1}, P_{i+1} }
% {{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot LC_\ell(D_2)\cdot D_3}, \, P_3 }
% \end{equation}
%%%%% A better version without \exists :
\begin{equation}\mytag{L2}
\frac{ 
{\tt loop\,X_\ell\{C\}}, P_0  \vdash D_{1}, P_{1} \quad
{\tt loop\,X_\ell\{C\}}, P_1  \vdash D_{2}, P_{1} \quad
{\tt loop\,X_\ell\{C\}}, P_1  \vdash D_{3}, P_{3} \qquad \ell\notin P_0}
{{\tt loop\,X_\ell\{C\}}, P_0 \vdash   {D_1\cdot LC_\ell(D_2)\cdot D_3}, \, P_3}
\end{equation}

Note that as $LC_\ell$ is applied to $D_2$, we require that $D_2$ be a dependence
that can be iterated: this requires that the pre-context and post-context be
the same.

\section{Technical Background (~\cite{BJK08} )}
One important data structure described in ~\cite{BJK08} was integrated into the algorithms of this document. 
This data structure is the \emph{Data Flow Relation}, or $DFR$ for short. 
A \emph{Data Flow Relation} is a pair $(M,R)$ with the following definitions.
\subsection{Defintion of M.} 
 $M$ was defined in  ~\cite{BJK08} to be a matrix such that M$_{ij}$ is the dependency type of X$_j$ on X$_i$. 
 The definition of $M$ in  ~\cite{BJK08} differs from the one in this document by the fact that $\mathbb{D}$ in ~\cite{BJK08} included also the possible value 0, hence each cell in the matrix $M$ could have one of the
 possible values $\{0,1,1^+,2,3\}$. In this document we refer to $M$ simply as a set of \emph{Data Flows}. 
In fact, M can be viewed as a bipartite, labeled digraph where the left-hand (source) side represents the input variables and the right-hand (target) side represents the output.
Unlike ~\cite{BJK08}, $M$ does not contain data flows with dependence type $0$ since $M$ holds only the existing data flows and nothing more. This is contrary to the matrix representation of $M$ where all pairs of variable data flows must be represented and a need rises for a ``bottom" dependency type, which is $0$. 

\subsection{Definition of R.} 
$R$ consists of  a 2-sets of arcs. Each single arc must be in a subset of $M$ 's arcs which are labeled with $1$ or 1$^+$. 
A pair $\{i\to j, i'\to j'\}\in R$ with $j\ne j'$ describes {\em simultaneous flow\/}:
in some execution path, the value of $\X_i$ flows into $\X_j$, while
that of $\X_{i'}$ flows into $\X_{j'}$. A pair $\{i\to j,i' \to j\}$
describes {\em additive flow\/}: the values originating from $\X_i$,
$\X_{i'}$ get added together at some point and end up in $\X_j$.
In this case, the corresponding matrix entries must be $1^+$.

Here is an example: consider the following command \verb+C+:
\begin{Verbatim}[codes={\catcode`$=3\catcode`_=8}]
choose { X$_2$ := X$_3$; X$_3$ := X$_1$} or { skip }
\end{Verbatim}
we have $M$ containing $\unarydep{3}{1}{2}$ and $\unarydep{3}{1}{3}$,
since the initial value of ${\tt X}_3$ may flow into ${\tt X}_2$. And it
may also remain in ${\tt X}_3$.
But the pair $\{3\to 2, 3\to 3\}$ is \emph{not} in $R$, indicating that these
two dataflows are not simultaneous.
 Therefore, following \verb+C+ with
the command
$$\verb/ X/_3 \verb/ := X/_2 \verb/ + X/_3$$
will \emph{not} create the data flow $\unarydep{3}{2}{3}$.

For compactness, a 2-set $\{i \to j, i' \to j'\}$ is denoted by $\binarydep{i}{i'}{j}{j'}$. This is infact the same defintion and notation as for the set $\mathbb{F}$ in definition 5.1.

%For example, suppose that command C is \verb+X+$_1$\verb+ := X+$_2$\verb& + X&$_2$, then $M$ will contain the arcs $2\xrightarrow{2}1$, and $2\xrightarrow{1}2$, therefore $R$ will include the 2-sets: \\\\ 
%$\binarydep{2}{2}{1}{1}, \; \binarydep{2}{2}{2}{2}$, and $\binarydep{2}{2}{1}{2} $.
%$\{2 \rightarrow 1, 3 \rightarrow 1\},\{2 \rightarrow 2, 2 \rightarrow 2\},\{3 \rightarrow 3, 3 \rightarrow 3\}, \{2 \rightarrow 2, 3 \rightarrow 3\}, \{3 \rightarrow 1, 3 \rightarrow 3\}, \{3 \rightarrow 1, 2 \rightarrow %2\},\\\\ \{2 \rightarrow 1, 3 \rightarrow 3\}, \{2 \rightarrow 1, 2 \rightarrow 2\} $.

\subsection{Usage of DFRs}
A $DFR$ provides a compact way of describing the data flows between the program variables. 
The algorithms described in this document use the inference rules of ~\cite{BA10} to generate \emph{dependence judgements} from the program.
Given a command \verb+C+, a pre-context $P$ and a post-context $Q$ the set of all dependence judgements can be written as follows (example):\newline \newline
\{\verb+C+,$P \vdash \unarydep{1}{2}{1},\, Q$, \; \verb+C+,$P \vdash \unarydep{2}{1}{2},\, Q$, \; \verb+C+,$P \vdash \unarydep{2}{2}{1},\, Q$, \; $...$\;\}\newline \newline
A compact notation for describing this set is \verb+C+,$P \vdash D,\, Q$, where $D$ is a \emph{Data Flow Relation}.
Looking at this notation we come up with a definition of a \emph{context graph}.
\bdfn
A \emph{context graph} is a directed graph where each vertex represents a \emph{reset context} and each arc is labeled with a $DFR$.
\edfn
Context graph can hold all dependence judgements given all possible pre-contexts and post-contexts.
It is an intuitive data structure for maintaining the dependence judgements during the program analysis.

\section{The ``Naive" Approach}
In this section we describe an analysis algortihm based on the inference rules of ~\cite{BA10} which takes a ``Naive" approach to the problem:
The Abstract Syntax Tree is processed bottom-up and for every command the set of all true judgments are generated based on the inference rules . 
This implies exponentiallity of the algorithm runtime and space as for each command the amount of reset contexts may be exponential with respect to the number of variables in the program.
 
\subsection{Preliminaries}
Following are some definitions used in the algorithm description.
%TODO: move to tech. background section.
\bdfn
Denote $\mathbb{C}$ to be a set of all reset contexts over the program's variables.
\edfn
\bdfn
Let $G$ be a \emph{context graph}, then $G_{P,Q}$ is the $DFR$ associated with the arc from context $P$ to context $Q$ in $G$.
\edfn

\bdfn
We define the \emph{compositional operator} ``$\cdot$" for context graphs as follows: \newline 
Let $G,G'$ be some \emph{context graphs}, then
 $G \cdot G' = G''$ where: \newline
1. $E(G'') = \{ (P,Q) \; | \; \exists P' \in \mathbb{C} :  (P,P') \in E(G) \wedge (P',Q) \in E(G') \}$ \newline
2. $G''_{P,Q} = \bigsqcup G_{P,P'} \cdot G'_{P',Q}$ such that $(P,P') \in E(G)$ and $(P',Q) \in E(G')$ 
\edfn
In the expression  $G_{P,P'} \cdot G'_{P',Q}$, the ``$\cdot$" operator is the one defined for $DFR$s.

\bdfn
We define the \emph{join} or \emph{least upper bound}  operator ``$\sqcup$" for  context graphs as follows.
Let $G,G'$ be some \emph{context graphs}, then \newline $G \sqcup G' = G''$ where: \newline
1. $E(G'') = \{ (P,Q) \;| \; (P,Q) \in E(G) \vee (P,Q) \in E(G') \}$  \newline
2. $(P,Q) \in E(G) \wedge (P,Q) \not\in E(G') \rightarrow G''_{P,Q}=G_{P,Q}$  \newline
3. $(P,Q) \not\in E(G) \wedge (P,Q) \in E(G') \rightarrow G''_{P,Q}=G'_{P,Q}$ \newline
4. $(P,Q) \in E(G) \wedge (P,Q) \in E(G') \rightarrow G''_{P,Q}=G_{P,Q} \sqcup G'_{P,Q}$  
\edfn
In the expression  $G_{P,Q} \cdot G'_{P,Q}$, the ``$\sqcup$" operator is the one defined for $DFR$s.

\bdfn
For a context $P$, we denote: \newline
1. $\hat{P}_{x,y}=P \setminus \{x\} \cup \{ x \;|\; if \; y \in P \}$ \newline
2. $\hat{P}_{x,y,z}=P \setminus \{x\} \cup \{ x \; | \; if \; y \in P \wedge  z \in P \}$ \newline
3. $\check{P}_{x,y,z}=P \setminus \{x\} \cup \{ x \; | \; if \; y \in P \vee  z \in P \}$ \newline
\edfn

\bdfn
Let $G$ be a \emph{context graph}, then $LCG_\ell(G)$ is the \emph{loop corrected} G, it is a context graph  defined as follows: \newline
\begin{eqnarray*}
 \forall P_0,P_1,P_2\in \mathbb{C} : & ((P_0,P_1) \in E(G) \wedge (P_1,P_2) \in E(G)) \rightarrow \\ &
 LCG_\ell(G)_{P_0,P_1} = (G_{P_0,P_1}\cdot LC_\ell (G_{P_1,P_1}) \cdot G_{P_1,P_2})  \\
\end{eqnarray*}
\edfn
In the expression  $LC_\ell (G_{P_1,P_1})$, the ``$LC_\ell$" operator is the one defined for $DFR$s.



\subsection{The ``Naive" Analysis Algorithm}
The algorithms' input is a command of the core language and its output is a context graph.
The algorithm works bottom-up; the premises of a composite command  are analysed first, resulting with context graph/s which are then combined in some way to yield
the analysis result of the composite command itself. We describe the algorithm by recursively describing its output on each command of the core language.
\begin{eqnarray*}
\lsem{\tt C}_1 {\tt ;C}_2\rsem &=&
  \sempar{C$_2$}\cdot\sempar{C$_1$} \\
%choose {} or {} command analysis semantics:
\lsem\verb+choose+\; \{ \verb+C+_1 \}\; \verb+or+ \; \{ \verb+C+_2 \}\rsem &=&
  \sempar{C$_1$}\sqcup\sempar{C$_2$} \\
%skip command analysis semantics:
\lsem \verb+skip+ \rsem &=&
   \lsem{\tt X}_1 {\tt \;:=X}_1 \rsem \\
%zero assignment command analysis semantics:
\lsem {\tt X}_i {\tt\; :=0} \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,P \cup \{i\}} = 
      \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq i \}   \\
%var assignment command analysis semantics:
\lsem {\tt X}_i {\tt \;:=X}_j \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,\hat{P}_{i,j}} =
     \{ \unarydep{j}{1}{i} | y \not \in P  \} \cup 
     \{ \unarydep{k}{1}{k} | k \not \in P \wedge k \neq i \}   \\
%sum assignment command analysis semantics:
\lsem {\tt X}_i {\tt \;:=X}_j \;{\tt + \;X}_k \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,\check{P}_{i,j,k}} = \\ &&
     \{ \unarydep{t}{1}{t} | t \neq i \wedge t \not \in P  \} \cup 
    \{ \unarydep{j}{1}{i} | j \not \in P \wedge k \in P \} \cup \\ &&
    \{ \unarydep{k}{1}{i} | k \not \in P \wedge j \in P \} \cup
   \{ \unarydep{t}{1^+}{i} | t \in \{j,k\} \wedge j,k \not\in P \}  \\
%product assignment command analysis semantics:
\lsem {\tt X}_i {\tt :=X}_j \;{\tt * \; X}_k \rsem &=& Context \; graph \; G \; where: \\ 
  &&  \forall P \in \mathbb{C} : G_{P,\hat{P}_{i,j,k}} = \\ &&
     \{ \unarydep{t}{1}{t} | t \neq i \wedge t \not \in P  \} \cup 
    \{ \unarydep{t}{2}{i} | t \in \{j,k\} \wedge j,k \not\in P \}  \\
\end{eqnarray*}

For the loop command analysis semantics definition we must first define the effect of the \emph{fix} operator.
\bdfn
Let $G$ be a \emph{context graph}, then \verb+fix+$(G)$ is the ``\emph{loop closured}" G, it is a context graph  defined as follows: \newline

\begin{eqnarray*}
 {\tt fix}(G) =  \lsem {\tt skip} \rsem \sqcup (\bigsqcup_{1 \le i \le \infty} G^i) \\
\end{eqnarray*}
Here $G^i$ stands for $i$ compositions of $G$. For example, $G^3 = G \cdot G \cdot G$.
Although the formal definition of the fix operator contains the $\infty$ symbol, computing the loop closure of a context graph requires some finite $m>0$ iterations.
This property is true due to the following observation:
\begin{eqnarray*}
{\tt If\;}  \lsem {\tt skip} \rsem \sqcup (\bigsqcup_{1 \le i \le k} G^i) = \lsem {\tt skip} \rsem \sqcup (\bigsqcup_{1 \le i \le k-1} G^i) \;{\tt then} \\
{\tt for\; each\;\;} \ell>k: \;\; \lsem {\tt skip} \rsem \sqcup (\bigsqcup_{1 \le i \le k} G^i) = \lsem {\tt skip} \rsem (\sqcup \bigsqcup_{1 \le i \le l} G^i) \\
\end{eqnarray*}
\edfn 

We now proceed to the loop command analysis semantics, which are simply defined as: 
\begin{eqnarray*}
%loop command analysis semantics:
\lsem {\tt loop \; X}_\ell \; {\tt C} \rsem &=& {\tt LCG_{\ell}(fix(} \lsem {\tt C} \rsem)) \\ 
\end{eqnarray*}

%The sequence (\verb+;+) and \verb+choose+ commands are analysed using the sequencial composition (``$\cdot$") and join (``$\sqcup$") operators.
%The \verb+skip+ command is analysed the same way a self assignment is analysed.

\section{The Abstract Interpreter}

\section{Conclusion}

% This paper presents progress in a project of investigating program abstractions
% with decidable growth-rate analysis problems, and establishing the computational
% complexity of such problems.
% In particular, we have added a reset-to-zero command to the language of~\cite{BJK08}.
% We have suggested a solution based on inference rules that are targeted
% at proving non-polynomial growth, instead of the previous approach of computing
% certificates to polynomiality. This idea may be interesting in its own right; note that
% we can freely exchange a problem with its complement precisely because we are designing 
% a precise decision procedure!
% This also allows us to classify the complexity of the analysis problem.
% In the case of resets, we have
% shown that the analysis problem rises from PTIME to PSPACE-complete.
% There are other possible extensions whose complexity (even decidability) are not known.
% An intriguing theoretical question is: are there any (reasonably natural) ``core languages"
% for which the analysis is decidable, yet has a complexity above PSPACE-complete?

\subsection*{Acknowledgement}

\bibliographystyle{eptcs}
\bibliography{icc}

\end{document}
