In this section we show that \hof is powerful enough to model Minsky machines (see Section \ref{ss:expr-approaches}). %\citep{Minsky67}, a Turing complete model. 
We present an encoding that is not \emph{faithful}: unlike the encoding of
Minsky machines in \hocore, it may introduce computations which do not correspond to the expected
behavior of the modeled machine. Such computations are forced to be infinite and
thus regarded as non-halting computations which are therefore ignored. 
%Only finite computations correspond to those of the encoded Minsky machine.
%a non-deterministic encoding that can introduce computations which do not follow the expected behavior of the modeled machine. Nevertheless, these computations are infinite.
 More precisely, given a \mma $N$, its encoding $\encp{N}{}$ has a terminating computation if and only if $N$ terminates. This allows to prove that convergence is undecidable.

%We begin by briefly recalling the definition of Minsky machines; we then present the encoding into \hof and discuss its correctness. 

% %\vspace{-2mm}
% %\shortv{
% %\noindent {\bf Minsky machines.}}
% %\longv{
% \paragraph{\bf Minsky machines.}
% %}
% %\subsection{Minsky machines}
% A Minsky machine 
% is a Turing complete model composed of   
% a set of sequential, labeled   
% instructions, and two registers.   
% Registers $r_j ~(j \in \{0,1\})$ can hold arbitrarily large natural numbers.   
% Instructions $(1:I_1), \ldots, (n:I_n)$ can be of two kinds:  
% $\mathtt{INC}(r_j)$ adds 1 to register $r_j$ and proceeds to the next instruction;  
% $\mathtt{DECJ}(r_j,s)$ jumps to instruction $s$ if $r_j$ is zero, otherwise it decreases register $r_j$ by 1 and proceeds to the next instruction.  
% %
% A \mma includes a program counter $p$ indicating the label of the instruction  being executed.   
% In its initial state, the machine has both registers set to $0$ and the program counter $p$ set to the first instruction.  
% %that the machine starts with zero in both registers and that $(1:I_1)$ is the first instruction to be  
% %executed.   
% The \mma stops whenever the program counter is set to a non-existent instruction, i.e. $p > n$.   
%   %
%   %
% %\subsubsection*{Reduction in MMs}  
% %
% A \emph{configuration} of a \mma is a tuple $(i,m_0,m_1)$; it consists of the current program counter and the values of the registers. Formally, the reduction relation  
% over configurations of a \mma, denoted $\minskred$, is defined in Table
% \ref{fig:mmdef}. 
% 
% %\begin{figure}  
% \begin{table}[t]  
% %{\scriptsize
% \begin{mathpar}  
% \inferrule*[left=M-Inc]{i:\mathtt{INC}(r_j) \quad m_j' = m_j + 1 \quad m_{1-j}' = m_{1-j}}{(i,m_0,m_1)\minskred(i+1,m_0',m_1')}  
% \and  
% \inferrule*[left=M-Jmp]{i:\mathtt{DECJ}(r_j,s) \quad m_j = 0}{(i,m_0,m_1)\minskred(s,m_0,m_1)}  
% \and  
% \inferrule*[left=M-Dec]{i:\mathtt{DECJ}(r_j,s) \\ m_j \neq 0 \\  m_j' = m_j - 1 \\ m_{1-j}' = m_{1-j}}{(i,m_0,m_1)\minskred(i+1,m_0',m_1')}
% \end{mathpar}  
% %}
% %\vspace{-3mm}
% \caption{Reduction of Minsky machines}  
% \label{fig:mmdef}  
% %\end{figure} 
% %\vspace{-5mm} 
% \end{table}  


The following notion of structural congruence will be useful later on.

\begin{mydefi}%[Structural Congruence]
\label{d:struct} 
The \emph{structural congruence} relation  
is the smallest congruence 
generated by the following laws: 
%{\small
$$P \parallel \mathbf{0} \equiv P, \ P_1 \parallel P_2 \equiv P_2 \parallel P_1,\ P_1 \parallel (P_2 \parallel P_3) \equiv (P_1 \parallel P_2) \parallel P_3.$$
%}
 \end{mydefi} 
%\emph{Reductions} $P \pired P'$ are defined as $P \arr{\tau} P'$.

\begin{lemma}\label{l:equiv}
If $P \!\arr \alpha\! P'$ and $P \equiv Q$ then there exists $Q'$ such that $Q\! \arr \alpha\! Q'$ and 
$P' \equiv Q'$.
\end{lemma}

\begin{proof}
%By induction on the transition $P \arr \alpha P'$.
%\as{Shouldn't we do the proof by 
By induction on the derivation of $P \equiv Q$, then by case analysis on $P \arr \alpha Q$.
%?}
\end{proof}
  

%In the encoding of a \mma into \hof we will find it convenient to have a simple form of guarded replication. This construct can be encoded in \hof as follows.


% % The encoding of a \mma into \hof is denoted as $\encp{\cdot}{\mms}$. 
% % We first present an encoding of a simple form of %guarded choice and 
% % guarded replication that will be used in the encoding of the \mma. 
%  
% %\shortv{\noindent {\bf Input-guarded replication.}}
% %\longv{
% \paragraph{\bf Input-guarded replication.}  
% %}
% % \finish{DS: below, as far as i can see, it is the standard encoding of 
% %   replication in HOpi, with restriction omitted. Is this all, or is 
% %   there something  else that i am missing? (otherwise,  it is not very 
% %   surprising, and should not be emphasized in introduction and 
% %   elsewhere, probably) I guess the fact that the 
% %   replications encoded is ``guarded'' is important? }   
% We follow the standard encoding of replication in higher-order process
% calculi, adapting it to input-guarded replication so as to make sure
% that diverging behaviors are not introduced.  As there is no
% restriction in \hof, the encoding is not compositional and
% replications cannot be nested. 
% \shortv{In \cite{LanesePSS08} the following encoding is shown to preserve termination.}
% 
% %is only correct if the replications may only be triggered once, that is if they are not replicated themselves. A conservative approximation is to require that they are in evaluation context (i.e. in top level parallel compositions).
% %\finish{IL: What do you mean here Alan? What's the problem if they are under a prefix? Maybe there are problems if they are nested.} \finish{AS: Is it better?}
% 
% 
% \begin{mydefi}
% \label{defin:grepl}  
% Assume a fresh name $c$. The encoding of \emph{input-guarded replication} is  as follows:
% % %{\small
% % $$  
% % \encp{!a(z).P}{\inrep} = a(z).(Q_c \parallel P) \parallel \msend{c}{a(z).(Q_c \parallel P)}  
% % $$
% % %}
% % where $Q_c = c(x) .(x \parallel \overline{c}\langle x \rangle)$, 
% \[
% \encp{!a(z).P}{\inrep} = a(z).(c(x).(x \parallel \outC{c}\langle x \rangle \parallel P)) 
% \parallel \msend{c}{a(z).(c(x).(x \parallel \outC{c}\langle x \rangle \parallel P))}   
% \]
% where 
% $P$ contains no replications (nested replications are forbidden),
% and $\encp{\cdot}{\inrep}$ is an homomorphism on the  
% other process constructs in \hof.  
% \end{mydefi}
% 
% %\longv{The above encoding preserves termination.  
% 
% It is worth noticing that after the input on $a$, 
% a copy of $P$ is only released after a synchronization on $c$.
% More precisely, we have the following correctness statement.
% 
% \begin{mylem}[Correctness of $\encp{\cdot}{\inrep}$]  
% \label{l:corr-repl}
% Let $P$ be a \hof process with non-nested input-guar\-ded replications.   \vspace{-2mm}
% \begin{itemize}  
% \item If $\encp{P}{\inrep}\!  \pired \! Q$ then $\exists P'$ such that $ P\! \pired\! P'$ and  
% either $\encp{P'}{\inrep} = Q$ or $Q \pired \encp{P'}{\inrep}$.  
% \item If $P \pired P'$ then either $\encp{P}{\inrep} \pired \encp{P'}{\inrep}$ or  
% $\encp{P}{\inrep} \pired\pired \encp{P'}{\inrep}$.  
% \item $\encp{P}{\inrep} \nrightarrow$ iff $P \nrightarrow$.  
% \end{itemize}  
% \end{mylem}  
% \begin{proof} 
% By induction on the transitions. \qed
% %By induction on the length of the inference $\xrightarrow{~\alpha~}$.
% \end{proof}

% With a slight abuse of notation, in what follows
% we shall use input-guarded replication in \hof processes without 
% referring back to the encoding $\encp{\cdot}{\inrep}$.
% %\shortv{\noindent {\bf Encoding Minsky machines into \hof.}}
% %\longv{


\subsection{Encoding Minsky Machines into \hof}
%}
The encoding of Minsky machines into \hof is denoted by $\encp{\cdot}{\mms}$ and presented in
Figure \ref{f:encod-hof}. 
The encoding is assumed to execute in parallel with a process $loop.\textsc{Div}$,
which 
represents divergent behavior 
that is spawned
in certain cases with an output on name $loop$.
This will be made more precise later, when
defining the encoding of a configuration of a \mma.  
Before that, we begin by 
discussing the encodings of registers and instructions.
%The cornerstone of an encoding of \mma is the definition of counters that may be tested for zero. 
%
\begin{figure}[t]
\centering  
%{\small 
\begin{tabular}{l}   
\(  
\mathrm{\textsc{Register}}~r_j \qquad
\encp{r_j = m}{\mms}  =  \prod_{1}^{m}\overline{u_j}\vspace{-2mm}
\) \\ \\
\(   
\begin{array}{lll}   
\multicolumn{3}{l}{\mathrm{\textsc{Instructions}}~(i:I_i)}\\  
\encp{(i: \mathtt{INC}(r_j))}{\mms}&  = &  !p_i.(\overline{u_j} \parallel set_j(x) .( \Ho{set_j}{x \parallel \mbox{\textsc{Inc}}} \parallel \overline{p_{i+1}}))\\  
\encp{(i: \mathtt{DECJ}(r_j,s))}{\mms}&  = & ~~~!p_i. (\overline{loop} \parallel u_j . loop . set_j(x) . (\Ho{set_j}{x \parallel \mbox{\textsc{Dec}}} \parallel \overline{p_{i+1}})) \\
& &  \parallel  !p_i . set_j(x) . (x \parallel \Ho{set_j}{x} \parallel \overline{p_s}) 
\end{array}   
\) \\
\quad where \\
\(
\begin{array}{lll}
 %\qquad \qquad \mbox{\textsc{Inc}}_j &=& \overline{loop} \parallel check_j.loop \qquad \qquad  \mbox{\textsc{Dec}}_j = \overline{check_j}
\qquad \qquad \mbox{\textsc{Inc}} &=& \overline{loop} \qquad \qquad 
 \mbox{\textsc{Dec}} = loop
\end{array}
\)\\  
\end{tabular}
%}  
\caption{Encoding of Minsky machines into \hof}  
\label{f:encod-hof}  
%\vspace{-5mm}
\end{figure}
%
% 
%Numbers are represented as parallel instances of the same process: i.e. t

%The encoding of a 
A register $r_j$ that stores the number $m$ 
is encoded as the parallel composition of  $m$ copies of the unit process $\overline{u_j}$.
To implement the test for zero it is necessary to record how many increments and decrements have been performed on the register $r_j$. This is done by using a special process $\textsc{Log}_j$, which is communicated back and forth on name $set_j$. More precisely, every time an increment instruction occurs,  a new copy of the process $\overline{u_j}$ is created, and 
%but we also receive on $set_j$ all the previous computations on the register (the process 
the process $\textsc{Log}_j$ is updated by adding the process $\textsc{Inc}$ in parallel. 
Similarly for decrements: a copy of $\overline{u_j}$ is consumed and the process $\textsc{Dec}$ is added to $\textsc{Log}_j$. As a result, after $k$ increments and $l$ decrements on register $r_j$,  we have that %process $\textsc{Log}_j$ is of the form 
$\textsc{Log}_j = \prod_{k} \textsc{Inc} \parallel \prod_{l} \textsc{Dec}$, 
which we abbreviate as $\textsc{Log}_j[k,l]$. 

Each instruction $(i:I_i)$ is a replicated process guarded by $p_i$, which represents the program counter when $p=i$.   
Once $p_i$ is consumed, the instruction is active and,
in the case of increments and decrements,
an interaction with a register occurs. We already described the behavior of increments. Let us now focus on decrements, the instructions that can introduce divergent ---unfaithful--- computations. 
In this case, %When the instruction to be encoded is a decrement 
the process can internally choose either to actually perform a decrement and proceed with the next instruction, or to jump. 
This internal choice takes place on $p_i$; 
it can be seen as a \emph{guess} the process makes on the actual number stored by the register $r_j$. Therefore, two situations can occur: %\vspace{-1.5mm}

\begin{enumerate}
 \item \emph{The process chooses to decrement $r_j$.} 
In this case a process $\overline{loop}$ 
as well as  an input on $u_j$
become immediately available. 
The purpose of the latter is to produce a synchronization with
a complementary output on $\outC{u_j}$ (that represents a unit of $r_j$).

If this operation succeeds 
(i.e., the guess is right as the content of $r_j$ is greater than 0) then a synchronization 
between the output $\outC{loop}$ ---available at the beginning--- and 
the input on $loop$ that guards the update of $\textsc{Log}_j$ takes place.
After this synchronization, 
the log of the register is updated
(this is represented by two synchronizations on name $set_j$) 
and instruction $p_{i+1}$ is enabled.

Otherwise, 
if the synchronization on $u_j$ fails
then it is because the content of $r_j$ is zero and the process made a wrong guess.
%then the unit process $\overline{u_j}$ could not be consumed.
The process $\overline{loop}$ 
available at the beginning
then synchronizes with the external process $loop.\textsc{Div}$, thus spawning a divergent computation.
% In this case instruction $p_{i+1}$ is immediately enabled, and 
% the process launches process $\overline{loop}$ and then tries to consume a copy of $\overline{u_j}$. If this operation succeeds 
% (i.e. the guess is right as the content of $r_j$ is greater than 0) then a synchronization with the input on $loop$ that guards the update of $\textsc{Log}_j$ (represented as an output on name $set_j$) takes place.
% Otherwise, the unit process $\overline{u_j}$ could not be consumed (i.e. the content of $r_j$ is zero and the process made a wrong guess). 
% Process $\overline{loop}$ then synchronizes with the external process $loop.\textsc{Div}$, thus spawning a divergent computation.

 \item \emph{The process chooses to jump to instruction $p_s$.} 
In this case, the encoding checks if the actual value stored by $r_j$ is zero. 
To do so, the process receives the process $\textsc{Log}_j$ on name $set_j$
%he history of the actions performed on the register 
and launches it.
The log contains a number of $\textsc{Inc}$ and $\textsc{Dec}$ processes;
depending on the actual number of 
increments 
and decrements, two situations can occur.

In the first situation, the number of increments 
is equal to the number of decrements (say $k$); hence,
the value of the $r_j$ is indeed zero and the process made a right guess.
In this case, $k$ synchronizations on name $loop$ take place and instruction $p_s$ is enabled.

In the second situation, the number of increments is greater than the number 
of decrements; hence, the value of $r_j$ is greater than zero and the process made a wrong guess.
As a result, at least one of the  $\overline{loop}$ signals remains active;
by means of a synchronization the process $loop.\textsc{Div}$
this is enough to 
to spawn a divergent computation.
% In this case instruction $p_s$ is immediately enabled, and it is necessary to check if the actual value stored by $r_j$ is zero. To do so, the process receives the process $\textsc{Log}_j$ 
% %he history of the actions performed on the register 
% and launches it. If the number of 
% increments is equal to the number of decrements 
% %processes $\textsc{Inc}$ is equal to the number of processes $\textsc{Dec}$ 
% then 
% complementary signals on the name $check_j$ will match each other.
% %every occurrence of the signal $check$ is matched by a corresponding $\overline{check}$. 
% In turn, this allows each signal $\overline{loop}$ executed by an $\textsc{Inc}_j$ process to be matched by a complementary one. %can be consumed by the corresponding signal $loop$. 
% Otherwise, then it is  the case that at least one of those  $\overline{loop}$ signals remains active (i.e. the content of the register is not zero); a synchronization with the process $loop.\textsc{Div}$ then takes place, thus spawining a divergent computation.
\end{enumerate}

  
  
Before executing the instructions, we require
both registers in the \mma to be set to zero. 
This is to guarantee correctness: starting with values different from zero in the registers (without proper initialization of the logs) can lead to inconsistencies. For instance, 
the test for zero would succeed (i.e., without spawning a divergent computation) even 
for a register whose value is different from zero.
%We now define the precise structure of the encoding of configurations.
%More precisely, we define the encoding of the initial configuration state of a \mma $N$ as the parallel composition of all the instructions of $N$, the signal that enables the first instruction, a divergent process guarded by the signal $loop$, and we initialize the record of actions performed on registers by sending on $set_j$ the process $\nil$ (for $j \in \{0,1\}$). This is the content of next definition:

The following notation will be useful.
\begin{newnotation}
 Let $N$ be a \mma. The configuration $(i,m_0, m_1)$ of $N$ is \emph{annotated} as 
$(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$, where, for $j \in \{0,1\}$, 
$k_j$ and $l_j$ stand for the number of increments and decrements performed on $r_j$. % so to store value $m_j$.
%We will denote a configuration after $k_j$ increments and $l_j$ decrements of register 
%$r_j$, () as $(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$. NON SI CAPISCE DA GIRARE
\end{newnotation}

Because we assume 
the value of both registers to be initialized with zero before executing the instructions, 
the following is immediate.

\begin{myfact}\label{f:annota}
Let $(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$  be an annotated Minsky configuration.
We then have, for $n \in \{0,1\}$:
%\begin{enumerate}
% \item 
(i) $k_n = l_n$ if and only if $r_n = 0$; and 
%\item 
(ii) $k_n > l_n$ if and only if  $r_n > 0$.
%\end{enumerate}

\end{myfact}


We are now ready to define the encoding of a configuration of the \mma. 
As mentioned before, the encodings of instructions and registers are put in parallel with
a process that spawns divergent behavior in case of a wrong guess.

\begin{mydefi}[Encoding of Configurations]\label{d:mmconfig}
 Let $N$ be a \mma with registers $r_0$, $r_1$ and instructions  
$(1:I_1), \ldots, (n:I_n)$. For $j \in \{0,1\}$, 
suppose fresh, pairwise different names $r_j$, $p_1, \ldots, p_n$, $set_j$, $loop$, $check_j$. 
Also, let \textsc{Div} be a divergent process (e.g. $\overline{w} \parallel !w.\overline{w}$).
Given the encodings in Figure \ref{f:encod-hof}, we have: %\vspace{-1mm}
\begin{enumerate}
 \item The initial configuration $(1, 0^{0, 0}, 0^{0, 0})$ of $N$ is encoded as:
%{\small
%\vspace{-2mm}
$$\encp{(1, 0^{0, 0}, 0^{0, 0}) }{\mms} ::=
\overline{p_1} \parallel \prod^{n}_{i=1} \encp{(i:I_i)}{\mms} \parallel 
loop.\textsc{Div} \parallel \Ho{set_0}{\nil} \parallel \Ho{set_1}{\nil} \ .\vspace{-2mm}$$
%}
\item A configuration $(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$ of $N$%, after $k_j$ increments and $l_j$ decrements of register 
%$r_j$, %(for $j \in \{0,1\}$) 
is encoded as:%\vspace{-4mm}
%{\small
\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1}) }{\mms} & =  &
\overline{p_i} \parallel \encp{r_0 = m_0}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel   \prod^{n}_{i=1} \encp{(i:I_i)}{\mms} \parallel \\
& & loop.\textsc{Div} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} ~.\vspace{-2mm}
\end{eqnarray*}
%}
\end{enumerate}
\end{mydefi}


\subsection{Correctness of the Encoding}
We formalize the correctness of our encoding by means of two lemmas
ensuring completeness (Lemma \ref{lem:corr}) and soundness (Lemma \ref{lem:sound}). 
Both these lemmas give us Theorem \ref{th:minsky}.
We begin by 
%\longv{
formalizing the following intuition: removing the program counter from 
the encoding of configurations leads to a stuck process.

\begin{proposition} \label{lem:blockinstr}
Let $N$ be a \mma with registers $r_0$, $r_1$ and instructions 
$(1:I_1), \ldots, (n:I_n)$. Given the encodings in Figure \ref{f:encod-hof}, let $P$ be defined as:
\begin{eqnarray*}
P & = & \encp{r_0 = m_0}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel   \prod^{n}_{i=1} \encp{(i:I_i)}{\mms} \parallel 
loop.\textsc{Div} \parallel \\
& & \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]}\, .
\end{eqnarray*}
Then $P \nrightarrow$.
\end{proposition}

\begin{proof}
 Straightforward by the following facts: 
\begin{enumerate}
 \item Processes $\encp{r_0 = m_0}{\mms}$, $\encp{r_1 = m_1}{\mms}$, $\Ho{set_0}{\textsc{Log}_0[k_0,l_0]}$, and $\Ho{set_1}{\textsc{Log}_1[k_1,l_1]}$ are output actions that cannot evolve on their own.
 \item For every $i \in 1..n$, each $\encp{(i:I_i)}{\mms}$ is an input-guarded process, waiting for an activation signal on  
%Hence, all instructions are guarded by the corresponding program counter 
$p_i$.
\item $loop.\textsc{Div}$ is an input-guarded process, and every output on $loop$ appears guarded inside a decrement instruction.
\end{enumerate}
% \qed
\end{proof}




% The following proposition formalizes an invariant condition on the number 
% of decrements and increments stored by the logs in the encoding.
% This invariant will be useful later, when formalizing the conditions under which
% our encoding leads to divergent computation.
% 
% \begin{myprop}\label{p:num_of_dec}
% Let $(1,0,0)$ be the initial configuration of a \mma $N$.
% For all $P$ such that 
% \begin{enumerate}
%  \item $\encp{(1,0,0)}{\mms} \pired^* P$;  
% \item for some $S$, $P \equiv \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} \parallel S$.
% \end{enumerate}
% Then, for $j \in \{0,1\}$, 
% it holds that $k_j \geq l_j$.
% \end{myprop}
% 
% \begin{proof}
% By contradiction, assuming that $k_j < l_j$.
% For $k_j < l_j$ to hold there was an execution in which $k_j = l_j$ and then a \textsc{Dec} was added to one of the logs.
% Using Fact \ref{f:annota} we know that this means that $r_j = 0$. In turn, 
% by the encoding of counters, we know this means that there is
% no top-level occurrence of $\outC{u_j}$.
% By inspection of the structure of the encoding of the decrement instruction, 
% we know that a process \textsc{Dec} can only be added to one of the logs 
% if the encoding of decrement takes the branch
% \[
% \overline{loop} \parallel u_j . loop . set_j(x) . \Ho{set_j}{x \parallel \mbox{\textsc{Dec}}_j} \parallel \overline{p_{i+1}} \, .
% \]
% Observe that a modification to the log can only occur in the event in which there is a top-level occurrence
% of $\outC{u_j}$. Indeed, the input on $set$ (which modifies the log) is guarded by $u_j$.
% Therefore, in order to modify the log an output on $u_j$ is indispensable; such an output is not available, so we reach a contradiction.
% \qed
% \end{proof}

\begin{myrem}
Before entering into the proofs two remarks are in order.
First, with a little abuse of notation, 
we use notation $Q \nrightarrow$ 
%to denote that there is no $Q'$ such that $Q \pired Q'$, both in \hof\ and 
also for configurations of Minsky machines. 
Second, the encoding of input-guarded replication we have introduced here
takes two reductions to release a new copy of the guarded process (see Definition \ref{defin:grepl}
and Lemma \ref{l:corr-repl}).
However, for the sake of simplicity, in proofs we shall denote only one of such reductions. 
In any case, it must be taken into account that two reductions are required.
\end{myrem}

We now state that the encoding is correct.

\longv{
\begin{lemma}[Completeness]\label{lem:corr}
 Let $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1})$ be an (annotated) configuration of a \mma $N$. Then, it holds:%\vspace{-1mm}
\begin{enumerate}
 \item If $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1})\nrightarrow$  then $\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms} \nrightarrow$
 \item If $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1})\minskred (i', m_0'^{~k'_0,l'_0}, m_1'^{~k'_1,l'_1})$ then, for some $P$, 
$\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms} \pired^* P \equiv \encp{(i', m_0'^{~k'_0,l'_0}, m_1'^{~k'_1,l'_1}) }{\mms}$
\end{enumerate}
\end{lemma}
%\shortv{
%\begin{proof}\vspace{-1.5mm}
% By case analysis on the type of instruction performed by $N$. \qed
%\end{proof}}
\begin{proof}
% \begin{enumerate}
%  \item 
For (1) we have that if $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1})\nrightarrow$  then, by definition of \mma, the program counter $p$ is set to a non-existent instruction; i.e.,  for some $i \not \in [1..n]$, $p= i$. 
Therefore, in process $\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms}$ no instruction is guarded by $p_i$. 
The thesis then follows as
an easy consequence of Proposition  \ref{lem:blockinstr}.
%and  since instruction $i$ does not exist in $N$ hence no instruction is guarded by $p_i$. Hence the thesis.


For (2) we proceed by a case analysis on the instruction performed by $N$. 
Hence, we distinguish three cases corresponding to the behaviors associated to rules \textsc{M-Jmp}, \textsc{M-Dec}, and \textsc{M-Inc}. Without loss of generality we assume instructions on register $r_0$.
%Without loss of generality we consider only instructions on register $r_0$ (the proof is symmetric for register $r_1$) AGGIUNGERE NOTA SU !
 \begin{description}
  \item[Case \textsc{M-Inc}] We have a \mma configuration $(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$ 
with $(i: \mathtt{INC}(r_0))$. By definition, its encoding into \hof is as follows:
\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms} & = & \overline{p_i} \parallel \encp{r_0 = m_0}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel  \prod_{h=1..n, i \not = h} \encp{(h:I_h)}{\mms} \parallel \\ 
 & & !p_i.(\overline{u_0} \parallel set_0(x) . (\Ho{set_0}{x \parallel \textsc{Inc}} \parallel \overline{p_{i+1}}))  \parallel \\
 & & loop.\textsc{Div} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]}
\end{eqnarray*}

We begin by noting that the program counter $p_i$ is consumed by the encoding of the
instruction $i$. As a result, process $\overline{u_0}$ %and $\outC{p_{i+1}}$ 
is left unguarded; 
this represents the actual increment. % and the invocation of the next instruction, respectively. 
We then have:

\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}  & \pired  \equiv & \encp{r_0 = m_0 + 1}{\mms} \parallel 
 set_0(x) . (\Ho{set_0}{x \parallel \textsc{Inc}} \parallel \overline{p_{i+1}}) \parallel \\
& & \Ho{set_0}{\textsc{Log}_0[k_0,l_0]} \parallel S = T
\end{eqnarray*}
where $S$ stands for the rest of the system, i.e.,
\[
S = \encp{r_1 = m_1}{\mms} \parallel \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel 
loop.\textsc{Div} \parallel  \Ho{set_1}{\textsc{Log}_1[k_1,l_1]}. 
\]

Now there is a synchronization on $set_0$ for updating the log of register $r_0$.
This leaves  $\outC{p_{i+1}}$ unguarded, so the next instruction is enabled.
%and after a synchronization on $set_0$, $T_1 \minskred T_2$ where 
%\begin{multline*}
\begin{eqnarray*}
T & \pired & \overline{p_{i+1}} \parallel \encp{r_0 = m_0 + 1}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel  \\
& & loop.\textsc{Div} \parallel \Ho{set_0}{\textsc{Log}_0[k_0+1,l_0]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} = T' \, .
\end{eqnarray*}
We notice that $T' \equiv \encp{(i+1, m_0+1^{k_0+1, l_0}, m_1^{k_1,l_1})}{\mms}$, as desired.



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\item[Case \textsc{M-Dec}] 
We have a \mma configuration $(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$  
with 
$r_0> 0$ and 
$(i: \mathtt{DECJ}(r_0,s))$. By definition, its encoding into \hof is as follows:
\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms} & = & \overline{p_i} \parallel \encp{r_0 = m_0}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel  \prod_{h=1..n,i\not = h} \encp{(h:I_h)}{\mms} \parallel \\
& & !p_i.(\overline{loop} \parallel u_0 . loop . set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}} \parallel \overline{p_{i+1}})) \parallel \\
  & &   !p_i . set_0(x) . (x \parallel \Ho{set_0}{x} \parallel \overline{p_s}) \parallel \\
 & & loop.\textsc{Div} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} 
\end{eqnarray*}

In $\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$ there is an internal choice on the program counter $p_i$.
This represents a guess on the value of $r_0$:
$\outC{p_i}$ can either synchronize with the first input-guarded process 
(so as to perform the actual decrement of the register) 
or with the second one (so as to perform a jump). 
Let us suppose that $\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$ makes the right guess in this case, i.e.,
$\overline{p_i}$ synchronizes with the first input-guarded process. 
We then have:

%The program counter is consumed by the encoding of the instruction $i$:
%after a synchronization on $p_i$, $S \minskred S_1$ where 
\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms} & \pired & \encp{r_0 = m_0}{\mms} \parallel  \\
& &   \overline{loop} \parallel u_0 . loop . set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}} \parallel \overline{p_{i+1}}) \parallel\\
& &  \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel S = T_1
\end{eqnarray*}
where $S$ stands for the rest of the system, i.e.,
\begin{eqnarray*}
S & ~=~ & \encp{r_1 = m_1}{\mms} \parallel  \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel 
 loop.\textsc{Div} \parallel  \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} \parallel \\
& &   !p_i . set_0(x) . (x \parallel \Ho{set_0}{x} \parallel \overline{p_s}) \, .
\end{eqnarray*}

Since we have assumed that $r_0 > 0$, we are sure that a synchronization on 
$u_0$ can take place, and thus the value of $r_0$ decreases.
Immediately after, there is also a synchronization on $loop$. More precisely, we have 

\begin{eqnarray*}
T_1 & \pired^2 & \encp{r_0 = m_0-1}{\mms} \parallel   
%& &   
set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}_0} \parallel \overline{p_{i+1}}) \parallel S = T_2 \,.
\end{eqnarray*} 
% where $S'$ is the rest of the system, represented by $S$ (as in the previous case) along with the input-guarded process that was not involved in the synchronization.
% Notice that $S'$ is stuck:
% \[
% S' = S \parallel !m_i . set_0(x) . (x \parallel \Ho{set_0}{x} \parallel \overline{p_s}) 
% \]
% 

% 
% \[
% T_2 \pired^2  \encp{r_0 = m_0-1}{\mms} \parallel   %\\
% %& &   \overline{loop} \parallel u_j . loop . 
% set_0(x) . \Ho{set_0}{x \parallel \mbox{\textsc{Dec}}_0} \parallel \overline{p_{i+1}} \parallel S' = T_3 \,.
% \]
Now the update of the log associated to $r_0$ can take place, and 
a synchronization on $set_0$ is performed. As a result, 
the process $\outC{p_{i+1}}$ becomes unguarded
and the next instruction is enabled:

\begin{eqnarray*}
T_2 & \pired \equiv  & \overline{p_{i+1}} \parallel \encp{r_0 = m_0 -1}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel  \prod_{h=1}^{n} \encp{(h:I_h)}{\mms} \parallel \\
 & & loop.\textsc{Div} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0+1]}\parallel \Ho{set_1}{\textsc{Log}_1[k_1,l_1]}  = T_3 \, .
\end{eqnarray*}
 
Clearly, $T_3 \equiv \encp{(i+1, m_0-1^{k_0, l_0+1}, m_1^{k_1,l_1})}{\mms}$, as desired.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\item[Case \textsc{M-Jmp}] 
This case is similar to the previous one. We have a 
\mma configuration $(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})$ with $(i: \mathtt{DECJ}(r_0,s))$. 
In this case, $m_0 = 0$. Hence, using Fact \ref{f:annota} we have that $k_0 = l_0$.

Again, we start from $\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$.
% After synchronizing on $p_i$ and spawning a new copy of (the encoding of) the instruction $i$,
% the process evolves to $T_1$. Once in $T_1$ 
There is an internal choice on the name $p_i$. 
Let us suppose  that 
$\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$
makes the right guess, which in this case corresponds to 
the synchronization of $\outC{p_i}$ and the second input-guarded process. 
We then have 
\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms} & \pired & \encp{r_0 = m_0}{\mms} \parallel   
set_0(x) . (x \parallel \Ho{set_0}{x} \parallel \overline{p_{s}}) \parallel \\
& &  \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel S' = T_1 \,.
\end{eqnarray*} 
%where $S'$ is the rest of the system
where $S'$ stands for the rest of the system, i.e.,
\begin{eqnarray*}
S & ~=~ & \encp{r_1 = m_1}{\mms} \parallel  \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel 
 loop.\textsc{Div} \parallel  \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} \parallel \\
& &   !p_i. (\overline{loop} \parallel u_0 . loop . set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}} \parallel \overline{p_{i+1}}))  \, .
\end{eqnarray*}

Now there is a synchronization on $set_0$. 
As a result, the content of the 
log is left at the top-level and hence 
executed. It is not lost, however, as it is still preserved inside an output on $set_0$: 
\begin{eqnarray*}
T_1 & \pired   \equiv & \overline{p_{s}} \parallel \encp{r_0 = m_0}{\mms} \parallel \encp{r_1 = m_1}{\mms} \parallel  \prod_{h=1}^{n} \encp{(h:I_h)}{\mms} \parallel \\
 & & loop.\textsc{Div} \parallel \prod^{k_0} \textsc{Inc} \parallel  \prod^{l_0} \textsc{Dec} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel \\
& & \Ho{set_1}{\textsc{Log}_1[k_1,l_1]}  = T_2 \, .
\end{eqnarray*}
Recall that $k_0 = l_0$. Starting in $T_2$, we have that $k_0$ 
synchronizations on $loop$ take place; each of these corresponds to the 
interaction between
a process $\textsc{Inc}$ and a corresponding process $\textsc{Dec}$. 
%Half of these interactions correspond to synchronizations on $check_0$, whereas the rest are synchronizations on $loop$. 
All of these processes are consumed.
We then have that there exists a $T_3$
such that (i) $T_2 \pired^{k_0} T_3$ and 
(ii) $T_3 \equiv \encp{(s, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$, as wanted.
 \end{description}
% \end{enumerate}
% \qed
\end{proof}


\begin{proposition}\label{prop:aux-sound}
Let $P_0 = \encp{(i,r_0^{k_0,l_0},r_1^{k_1,l_1})}{\mms}$ be the encoding of a \mma configuration
as in Definition \ref{d:mmconfig}, 
with $(i:\mathtt{DEC}(r_j,s))$ and $k_j > l_j$ (for $j \in \{0,1\}$).

Suppose $P_0 \pired^* P$ such that 
\begin{eqnarray*}
 P & \equiv & \encp{r_0 = m_0}{\mms} \parallel \prod^{k_j} \textsc{Inc} \parallel  
\prod^{l_j} \textsc{Dec} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]} \parallel \overline{p_{s}} \parallel  \\
& & loop.\textsc{Div} \parallel S
\end{eqnarray*}
and where $S$ is defined as 
\begin{eqnarray*}
S & ~=~ & \encp{r_1 = m_1}{\mms} \parallel  \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel 
   \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} \parallel \\
& &   !p_i .(\overline{loop} \parallel u_0 . loop . set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}} \parallel \overline{p_{i+1}}))  \, .
\end{eqnarray*}
Then $P$ does not converge.
\end{proposition}

\begin{proof}[Proof (Sketch)]
Without loss of generality, 
we focus on the case in which $j=0$ ---the proof is analogous for $j=1$---
and assume that $k_0 = l_0 +1$.
The thesis follows by noticing that 
the only possibilities for behavior are given by
sub-processes 
$\prod^{k_0} \textsc{Inc}$, $\prod^{l_0} \textsc{Dec}$, and 
$loop.\textsc{Div}$ of $P$.
In fact, using Definition \ref{d:mmconfig} it is possible to infer 
all the other processes cannot reduce on their own. 
The same definition decrees that 
$\textsc{Inc} = \outC{loop}$
and $\textsc{Dec} = loop$.
It is easy to see that divergent behavior can be spawned 
by any of the $k_0$ occurrences of \textsc{Inc}.
Notice that there is always at least one occurrence of \textsc{Inc}
ready to spawn divergency:
even in the case some of such occurrences would reduce with corresponding 
input actions on $loop$, 
since $k_0 = l_0 + 1$
in every computation there is at least an output $\outC{loop}$ ready to reduce
with $loop.\textsc{Div}$.
Since no other process can reduce with the free $\outC{loop}$,
this means there is always a computation in which it reduces with the process 
$loop.\textsc{Div}$. Hence, divergent behavior is spawned in all cases, and we are done.
% \qed
\end{proof}

% \begin{mydefi}
%  Given an \hof process $P$, we say that $P$ evolves 
% \emph{deterministically up-to structural congruence}
% if 
% \end{mydefi}



\begin{lemma}[Soundness]\label{lem:sound}
Let  $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1})$ be a %a \mma and one of its 
configuration of a \mma $N$.
Given $\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms}$, for some $n>0$ and  process $P \in \hof$, we have that:\vspace{-1mm}
\begin{enumerate}
 \item If $\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms} \pired^n P$ then either:
\begin{itemize}
  \item $P \equiv \encp{(i', m_0'^{k'_0,l'_0}, m_1'^{k'_1,l'_1}) }{\mms}$ and $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1})\minskred (i', m_0'^{k'_0,l'_0}, m_1'^{k'_1,l'_1})$, or
 \item  $P$ is a divergent process.
\end{itemize}
\item For all $0 \leq m < n$, if $\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms} \pired^m P$ then, for some $P'$, $P \pired P'$.
\item If $\encp{(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) }{\mms} \nrightarrow $ then $(i, m_0^{k_0,l_0}, m_1^{k_1,l_1}) \nrightarrow$.
\end{enumerate}
\end{lemma}
%\shortv{
%\begin{proof}\vspace{-1.5mm}
% By case analysis on the structure of $\encp{(i, m_0, m_1) }{\mms}$. \qed
%\end{proof}
%}

\begin{proof}
For (1), since $n >0$, in all cases there is at least one reduction from $\encp{(i, m_0, m_1) }{\mms}$. 
An analysis of the structure of process  $\encp{(i, m_0, m_1) }{\mms}$ 
reveals that, in all cases, the first step corresponds 
to the consumption of the program counter $p_i$. This implies that
there exists an instruction labeled with $i$, that can be executed from the configuration
$(i, m_0 , m_1)$. We proceed by a case analysis on the possible instruction, considering
also the fact that the register on which the instruction acts can hold a value equal or
greater than zero. 
%We exploit the analysis reported for the 
\begin{description}
\item[Case $i:\mathtt{INC}(r_0)$:] Then the process evolves deterministically 
(up-to structural congruence)
to $P \equiv \encp{(i+1, m_0+1, m_1) }{\mms}$ in  $n=2$ reductions.
This is illustrated in the analogous case 
in the proof of Lemma \ref{lem:corr}(2).

\item[Case $i:\mathtt{DEC}(r_0,s)$ with $r_0 > 0$:] 
We then have three main reduction sequences; one of them is finite, the other two are infinite.
The finite reduction sequence is illustrated in the analogous case in the proof of 
Lemma \ref{lem:corr}(2),  where it is shown how 
$\encp{(i,r_0^{k_0,l_0},r_1^{k_1,l_1})}{\mms}$
may perform a sequence of 
$n=4$ reductions
that leads to 
$\encp{(i+1, m_0-1, m_1) }{\mms}$.

The remaining (infinite) reduction sequences arise from the 
internal choice 
in $p_i$ that takes place in 
$\encp{(i,r_0^{k_0,l_0},r_1^{k_1,l_1})}{\mms}$.
The first such sequences arises when %from the synchronization between 
$\outC{p_i}$ 
synchronizes with the first input-guarded replication on $p_i$ (the one implementing decrement); 
this is as in the analogous case in the proof of 
Lemma \ref{lem:corr}(2).
This synchronization leads to process $T_1$ in which  
the diverging computation arises 
from the synchronization between
the process $\outC{loop}$  
and the process  $loop.\textsc{Div}$ that spawns divergent behavior and is always in parallel.

The second infinite sequence arises 
when $\outC{p_i}$ 
synchronizes with the second input-guarded replication on $p_i$ (the one implementing jump).
%The diverging reduction sequence takes place when the value of the register is greater than zero and a jump is performed.
%Let us elaborate on this possible execution. 
Notice that since $r_0 > 0$, using Fact \ref{f:annota}, we know that $k_0 > l_0$. 
It is sufficient to assume that $k_0 = l_0 +1$. 
We have 

\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms} & \pired & \encp{r_0 = m_0}{\mms} \parallel  set_0(x) . (x \parallel \Ho{set_0}{x} \parallel \overline{p_s}) \parallel \\
& &  loop.\textsc{Div} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]}\parallel S = T_1
\end{eqnarray*}
where $S$ stands for the rest of the system, i.e.,
\begin{eqnarray*}
S & ~=~ & \encp{r_1 = m_1}{\mms} \parallel  \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel 
   \Ho{set_1}{\textsc{Log}_1[k_1,l_1]} \parallel \\
& &   !p_i .(\overline{loop} \parallel u_0 . loop . set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}} \parallel \overline{p_{i+1}}))  \, .
\end{eqnarray*}

In $T_1$ there is a synchronization on $set_0$. Using the definition of \textsc{Log}, we have:

\begin{eqnarray*}
 T_1 & \pired & \encp{r_0 = m_0}{\mms} \parallel \prod^{l_0 + 1} \textsc{Inc} \parallel  
\prod^{l_0} \textsc{Dec} \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]} \parallel \overline{p_{s}} \parallel  \\
& & loop.\textsc{Div} \parallel S = T_2 \, .
\end{eqnarray*}

% At this point there are $l_0$ synchronizations between the copies of $\textsc{Dec}$
% and those of $\textsc{Inc}$. There is a copy of $\textsc{Inc}$ that remains without synchronizing and hence we have:
% 
% \begin{eqnarray*}
%  T_2 & \pired^{l_0} & \encp{r_0 = m_0}{\mms} \parallel 
% \outC{loop} %\parallel check_0.loop 
%  \parallel \Ho{set_0}{\textsc{Log}_0[k_0,l_0]} \parallel \overline{p_{s}} \parallel  \\
% & & loop.\textsc{Div} \parallel S' = T_3 \, .
% \end{eqnarray*}
% 

%It is easy to see that in $T_3$ a synchronization on $loop$ to produce divergence. In fact, 
The above puts us in the scenario of 
Proposition \ref{prop:aux-sound}
which ensures that whenever 
a configuration in which  
the number of increments is greater or equal than the 
number of decrements is reached (as in $T_2$ above), the corresponding \hof process does not converge.
This concludes the analysis for the case of decrement.



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\item[Case $i:\mathtt{DEC}(r_0,s)$ with $r_0 = 0$:] 
Also in this case we have three main reduction sequences, one of them is finite,
while the other two are infinite. 
The finite reduction sequence is illustrated in 
the analogous case in the proof of 
Lemma \ref{lem:corr}(2),
where it is shown how 
$\encp{(i,r_0^{k_0,l_0},r_1^{k_1,l_1})}{\mms}$
may perform a sequence of 
$n = 2+l_0$ reductions
that leads to 
$\encp{(s, m_0, m_1) }{\mms}$.

The two infinite reduction sequences arise similarly as in the previous case.
The first one arises after the two reduction steps that lead to process $T_3$
in the analogous case in the proof of 
Lemma \ref{lem:corr}(2).
Indeed, only a single occurrence of process $\textsc{Inc}$ is sufficient 
to synchronize with process $loop.\textsc{Div}$ and to produce divergent behavior.

The second infinite reduction sequence arises when the process makes a wrong guess on the content of the register. 
Again, we carry our analysis starting 
from process $\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$, given in the analogous case of the proof
of Lemma \ref{lem:corr}(2). 
%Using Fact \ref{f:annota} we know that $k_0 = l_0$.
After the synchronization on $p_i$ we have

\begin{eqnarray*}
\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms} & \pired & \encp{r_0 = m_0}{\mms} \parallel   \\
& & \overline{loop} \parallel u_0 . loop . set_0(x) . (\Ho{set_0}{x \parallel \mbox{\textsc{Dec}}} \parallel 
\overline{p_{i+1}}) \parallel \\
& &  \Ho{set_0}{\textsc{Log}_0[k_0,l_0]} \parallel loop.\textsc{Div} \parallel S' = T_1 
\end{eqnarray*} 
where $S'$ is the rest of the system, i.e.
\begin{eqnarray*}
S' &=&  !m_i.(set_0(x) . (x \parallel \Ho{set_0}{x} \parallel \overline{p_{s}}))  \parallel \encp{r_1 = m_1}{\mms} \parallel  \\
& & \prod^{n}_{h=1} \encp{(h:I_h)}{\mms} \parallel  \Ho{set_1}{\textsc{Log}_1[k_1,l_1]}. 
\end{eqnarray*}

It is easy to observe that since $r_0 = 0$ there is no output on $u_j$ that can synchronize
with the input in $T_1$. In fact, the only possible synchronization is on $loop$, 
which leaves the divergent process unguarded. 
So we have that 
%$T_2 \pired \equiv \textsc{Div}$.
%Hence, $\encp{(s, m_0, m_1) }{\mms} \pired^n \equiv \textsc{Div}$ with $n = 2$, 
in two reduction steps $\encp{(i, m_0^{k_0, l_0}, m_1^{k_1,l_1})}{\mms}$
evolves into a diverging process, and the thesis holds.
\end{description}


Notice that statement (2) follows easily from the above analysis.

As for (3), using Proposition \ref{lem:blockinstr} we know that if $\encp{(i, m_0, m_1) }{\mms} \nrightarrow $ then it is because $p_i$ is not enabling any instruction. Hence, $\encp{(i, m_0, m_1) }{\mms}$ corresponds to the encoding of a halting instruction and we have that 
$(i, m_0, m_1) \nrightarrow $,  as desired.
% \qed
\end{proof}

Summarizing Lemmas \ref{lem:corr} and \ref{lem:sound} we have the following:}

\begin{theorem}\label{th:minsky}
Let $N$ be a \mma with registers $r_0 = m_0$, $r_1=m_1$, instructions  
$(1:I_1), \ldots, (n:I_n)$, and configuration $(i, m_0, m_1)$. 
%Let $P$ be the process $\encp{(i, m_0, m_1) }{\mms}$.
%Then $(i, m_0, m_1)$ terminates if and only if $P$ converges.
Then $(i, m_0, m_1)$ terminates if and only if process $\encp{(i, m_0, m_1) }{\mms}$ converges.
\end{theorem}


  

\longv{As a consequence of the results above we have that convergence is undecidable.}
\shortv{As a consequence of this theorem we have that convergence is undecidable.}

\begin{corollary}  
Convergence  is undecidable in \hof.  
\end{corollary}  
  
  
