The proof relies on two auxiliary results:
%We divide the proof of correctness into two properties:
completeness (Lemma \ref{l:compl-pase2}) and soundness (Lemma \ref{l:sound-pase2}).
Completeness relies on the auxiliary Lemma \ref{lem:correctstep}.

%We take for grant that given the process $\encp{N}{\mmn{2}}$ there exists a computation that after having generated a finite number of $\outC{f}$ and $\outC{b}$ makes available the program counter $p_1$ that enables the starting of the simulation of the given \mm and the input signal on $e$.
%This also accounts in observing that either $\encp{N}{\mmn{2}}$ has an infinite computation and therefore the barb on $e$ is exposed infinitely consecutive times or $\encp{N}{\mmn{2}}$ has a finite computation and $\encp{N}{\mmn{2}} \negbarbw{e}$. No other case is possible.
%
We first introduce the notion of 
encoding of a MM
configuration 
into \evols{2}.
Notice that it in addition to the encodings of registers and instructions, it includes a number of resources 
%where we abstract from the processes that handles resources: i.e. the occurrences of 
$\outC{f}$ and $\outC{b}$ 
which are always available during the execution of the machine:
%and the processes that handle their generation, these are generically denoted with $C$.

%\begin{definition}[Configuration]\label{def:confe2}
%Let $N$ be a \mm and $(i,m_0, m_1)$ one of its configuration.
%Then $\encp{(i, m_0, m_1)}{\mmn{2}}$ is defined as:
%$$
% \outC{p_i} \parallel e  \parallel
%\encp{r_0 = m_0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \prod^{n}_{i=1} \encp{(i:I_i)}{\mmn{2}} \parallel C
%$$
%where $\encp{r_j = m_j}{\mmn{2}}$ for $j\in[0,1]$ and  $\encp{(i:I_i)}{\mmn{2}}$ for $i\in[1..n]$ are defined in Table~\ref{tab:minskyccsbs}. 
%\end{definition}

\begin{definition}\label{def:confe2}
Let $N$ be a \mm 
with 
registers $r_j ~(j \in \{0,1\})$ and 
instructions $(1:I_1), \ldots, (n:I_n)$.
The encoding of a configuration $(i,m_0, m_1)$ of $N$, denoted
$\encp{(i, m_{0}, m_{1})}{\mmn{2}}$, is defined as:
\[
 \outC{p_i} \parallel e \parallel \encp{r_0 = m_0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \prod^{n}_{i=1} \encp{(i:I_i)}{\mmn{2}} \parallel \reso{\alpha,\beta,\gamma}
\]
where 
\begin{itemize}
\item $\reso{\alpha,\beta,\gamma} \stackrel{\textrm{def}}{=} \prod^{\alpha} \outC{f} \parallel \prod^{\beta} \outC{b} \parallel \prod^{\gamma} \outC{g} \parallel !a.(\outC{f} \parallel \outC{b} \parallel \outC{a}) \parallel !h.(g.\outC{f} \parallel \outC{h})$, with  $\alpha,\beta,\gamma \geq 0$
\item the encodings 
$\encp{r_j = m_j}{\mmn{2}}$ and  
$\encp{(i:I_i)}{\mmn{2}}, \ldots, \encp{(n:I_n)}{\mmn{2}}$ are as in Table~\ref{tab:minskyccsbs}.
\end{itemize}
\end{definition}
Notice that $\reso{\alpha,\beta,\gamma}$ abstracts the evolution of 
process \textsc{Control} in Table~\ref{tab:minskyccsbs}, and the resources
that it produces and maintains (namely, $\alpha$ copies of $\outC{f}$, $\beta$ copies of $\outC{b}$, and $\gamma$ copies of $\outC{g}$).


\begin{remark}\label{rem:enough}
As we have discussed, the presence of copies of $\outC{f}$ is required for the execution of increment and
decrement-and-jump instructions. 
In their absence, the encoding of the \mm would reach a deadlocked state.
Such outputs are produced at the beginning of the execution of the encoding of a \mm, by means of a replicated process.
In the proofs below, we assume that the initialization of the encoding always produces enough copies of $\outC{f}$
so as to ensure the existence of a correct simulation of the machine. 
That is to say, we assume that the absence of copies of $\outC{f}$ is not a possible source of deadlocks.
\end{remark}

We prove that given a \mm $N$ there exists a 
computation of process $\encp{N}{\mmn{2}}$ which correctly mimics its behavior.

\begin{lemma}\label{lem:correctstep}
Let $(i, m_0, m_1)$ be a configuration of a \mm $N$. 
\begin{enumerate}
\item If $(i, m_0, m_1)\minskred (i', m_0', m_1')$ then, for some process $P$, 
it holds that $$\encp{(i, m_0, m_1) }{\mmn{2}} \pired^* P \equiv \encp{(i', m_0', m_1') }{\mmn{2}}$$
\item If $(i, m_0, m_1)\notminskred$ then $\encp{(i, m_0, m_1) }{\mmn{2}}\! \Downarrow_{\outC{p_1}}^1$.
\end{enumerate}
\end{lemma}
\begin{proof}
\emph{\underline{Item (1)}:}
We proceed by a case analysis on the instruction  performed by the \mma.
Hence, we distinguish three cases corresponding to the behaviors associated to rules
\textsc{M-Inc}, \textsc{M-Dec}, and \textsc{M-Jmp}.
Without loss of generality, we restrict our analysis to operations on register $r_0$. 
%We also assume that in the initial phase of the simulation enough resources of kind $f$ and $b$ are produced, those resources are thus ignored.

\begin{description}
\item[Case \textsc{M-Inc}:] We have a Minsky configuration $(i, m_0, m_1)$ with 
$(i: \mathtt{INC}(r_0))$. By Definition \ref{def:confe2}, its encoding into \evols{2} is as follows:
\begin{eqnarray*}
\encp{(i, m_0, m_1) }{\mmn{2}}  & = & 
 \outC{p_i} \parallel e  \parallel
\encp{r_0 = m_0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \\
& & !p_i.f.(\outC{g} \parallel b.\outC{inc_0}.\outC{p_{i+1}}) \parallel \prod_{l=1..n,l\neq i} \encp{(l:I_l)}{\mmn{2}} \parallel \reso{\alpha,\beta,\gamma}
\end{eqnarray*}

We then have:
$$\encp{(i, m_0, m_1) }{\mmn{2}}   \pired e  \parallel
\encp{r_0 = m_0}{\mmn{2}} \parallel f.(\outC{g} \parallel b.\outC{inc_0}.\outC{p_{i+1}}) \parallel \reso{\alpha,\beta,\gamma} \parallel S \! =\! P$$
where $S = \encp{r_1 = m_1}{\mmn{2}} \parallel \prod_{l=1}^{n} \encp{(l:I_l)}{\mmn{2}}$
stands for the rest of the system.
Starting from $P$, a possible sequence of reductions is the following:
\begin{eqnarray*}
P \! \! \! & \pired & e  \parallel \encp{r_0 = m_0}{\mmn{2}} \parallel b.\outC{inc_0}.\outC{p_{i+1}} \parallel \reso{\alpha-1,\beta,\gamma+1} \parallel S\\
& = & e  \parallel \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}}\overline{u_0} \parallel \outC{z_0} }  \parallel b.\outC{inc_0}.\outC{p_{i+1}} \parallel \reso{\alpha-1,\beta,\gamma+1} \parallel S\\
& \pired & e  \parallel \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}}\overline{u_0} \parallel \outC{z_0} }  \parallel \outC{inc_0}.\outC{p_{i+1}} \parallel \reso{\alpha-1,\beta-1,\gamma+1}\parallel S\\
& \pired \equiv \! \! & e  \parallel \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}+1}\overline{u_0} \parallel \outC{z_0} } \parallel \outC{p_{i+1}} \parallel \reso{\alpha-1,\beta-1,\gamma+1} \parallel S = P'
\end{eqnarray*}
It is easy to see that $P' \equiv \encp{(i+1, m_0+1, m_1)}{\mmn{2}}$, as desired. 
Observe how the number of resources changes: in the first reduction, a copy of $\outC{f}$ is consumed, and  a copy of $\outC{g}$ is released in its place.
Notice that we are assuming that $\beta > 0$, that is, that there is at least one copy of $\outC{b}$.
In fact, since the instruction only takes place after a synchronization on $b$ (i.e., the second reduction above)
the presence of at least one copy of $\outC{b}$ in $\reso{\alpha-1,\beta,\gamma+1}$ is essential to avoid  deadlocks.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\item[Case \textsc{M-Dec}:] We have a Minsky configuration $(i, m_0, m_1)$ with 
$m_0 > 0$ and 
$(i: \mathtt{DEC}(r_0,s))$. By Definition \ref{def:confe2}, its encoding into \evols{2}  is as follows:
\begin{eqnarray*}
\encp{(i, m_0, m_1) }{\mmn{2}} \! \! \!  & = \!\!\!& 
 \outC{p_i} \parallel e  \parallel
\encp{r_0 = m_0}{\mmn{2}} \parallel 
\encp{r_1 = m_1}{\mmn{2}} \parallel \\
& & !p_i.f.\big(\outC{g} \parallel (u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s})\big)\\
           & & \parallel  \prod_{l=1..n,l\neq i} \encp{(l:I_l)}{\mmn{2}} \parallel \reso{\alpha,\beta,\gamma}
\end{eqnarray*}

We then have:
\begin{eqnarray*}
\encp{(i, m_0, m_1) }{\mmn{2}} \!\!\!&  \pired \equiv\!\!\! &  
f.\big(\outC{g} \parallel (u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s})\big)       \\
           & &\parallel e  \parallel  \encp{r_0 = m_0}{\mmn{2}} \parallel           \reso{\alpha,\beta,\gamma} \parallel S = P
\end{eqnarray*}
where $S =  \encp{r_1 = m_1}{\mmn{2}} \parallel \prod_{l=1}^{n} \encp{(l:I_l)}{\mmn{2}}$ stands for the rest of the system.
Starting from $P$, a possible sequence of reductions is the following:
\begin{eqnarray*} 
P \!\!\!& \pired \!\!\! &  
u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel      \\
           & & e  \parallel  \encp{r_0 = m_0}{\mmn{2}} \parallel           \reso{\alpha-1,\beta,\gamma+1} \parallel S \\
& = \!\!\!&  
u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel      \\
           & & e  \parallel  \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}}\overline{u_0} \parallel \outC{z_0} }  \parallel           \reso{\alpha-1,\beta,\gamma+1} \parallel S ~= P'\\
& \pired \!\!\!&  \outC{p_{i+1}} \parallel  e \parallel \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}-1}\overline{u_0} \parallel \outC{z_0} }  \parallel           \reso{\alpha-1,\beta+1,\gamma+1} \parallel S ~= P'' 
\end{eqnarray*}
It is easy to see that $P' \equiv \encp{(i+1, m_0-1, m_1)}{\mmn{2}}$, as desired. 
Observe how 
in the last reduction 
the presence of at least  a copy of $\outC{u_{0}}$ in $r_{0}$ is fundamental for releasing both an extra copy of $\outC{b}$
and the trigger for the next instruction.

%\begin{multline*}
% \outC{p_i} \parallel e  \parallel \encp{r_0 = m_0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \prod^{n}_{l=1} \encp{(l:I_l)}{\mmn{2}} \parallel \\
%!p_i.f.\big(\outC{g} \parallel (u_0.(\outC{b} \parallel
%           \outC{p_{i+1}}) +   z_j.\update{r_j}{\component{r_j}{!inc_j.\outC{u_j} \parallel \outC{z_j}}}. \outC{p_s})\big) \parallel C
%\end{multline*}
%One possible reduction is the one that consumes in the order  $p_i$, $f$, and that outputs $\outC{g}$. At this point $u_0$ synchronizes with the content of the adaptable process located at $r_0$ thus consuming one instance of the process $u_0$.
%It is easy to see that the obtained process is equivalent to $\encp{(i+1, m_0-1, m_1)}{\mmn{2}}$, as desired. 
%Notice that there could be other reductions as the process can make a wrong guess, but we can ignore them.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\item[Case \textsc{M-Jmp}:] We have a Minsky configuration $(i, 0, m_1)$ and 
$(i: \mathtt{DEC}(r_0,s))$. By Definition \ref{def:confe2}, 
its encoding into \evols{2} is as follows:

\begin{eqnarray*}
\encp{(i, 0, m_1) }{\mmn{2}} \!\!\!  & = \!\!\!& 
 \outC{p_i} \parallel e  \parallel
 \encp{r_0 = 0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \\
& & !p_i.f.\big(\outC{g} \parallel (u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s})\big)  \\
           & & \parallel \prod_{l=1..n,l\neq i} \encp{(l:I_l)}{\mmn{2}} \parallel \reso{\alpha,\beta,\gamma}
\end{eqnarray*}

We then have:
\begin{eqnarray*}
\encp{(i, 0, m_1) }{\mmn{2}} \!\!\! &  \pired \equiv \!\!\! &  
f.\big(\outC{g} \parallel (u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s})\big)       \\
           & & \parallel e  \parallel  \encp{r_0 = m_0}{\mmn{2}} \parallel           \reso{\alpha,\beta,\gamma} \parallel S = P
\end{eqnarray*}
where $S =  \encp{r_1 = m_1}{\mmn{2}} \parallel \prod_{l=1}^{n} \encp{(l:I_l)}{\mmn{2}}$ stands for the rest of the system.
Starting from $P$, a possible sequence of reductions is the following:
\begin{eqnarray*} 
P & \pired &  
u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel      \\
           & & e  \parallel  \encp{r_0 = 0}{\mmn{2}} \parallel           \reso{\alpha-1,\beta,\gamma+1} \parallel S \\
& = &  
u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel      \\
           & & e  \parallel  \component{r_0}{!inc_0.\outC{u_0} \parallel  \outC{z_0} }  \parallel           \reso{\alpha-1,\beta,\gamma+1} \parallel S \\
& \pired &  \update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel e \parallel \component{r_0}{!inc_0.\outC{u_0}}  
 \parallel            \reso{\alpha-1,\beta,\gamma+1} \parallel S \\
 & \pired &  \component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}} \parallel  \outC{p_s} \parallel e 
 \parallel            \reso{\alpha-1,\beta,\gamma+1} \parallel S  = P'
\end{eqnarray*}
It is easy to see that $P' \equiv \encp{(s, 0, m_1)}{\mmn{2}}$, as desired. 
Observe how the number of copies of $\outC{b}$ remains invariant when the \mm is correctly simulated.

%\begin{multline*}
% \outC{p_i} \parallel e  \parallel \encp{r_0 = 0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \prod^{n}_{l=1} \encp{(l:I_l)}{\mmn{2}} \parallel \\
%!p_i.f.\big(\outC{g} \parallel (u_0.(\outC{b} \parallel
%           \outC{p_{i+1}}) +   z_j.\update{r_j}{\component{r_j}{!inc_j.\outC{u_j} \parallel \outC{z_j}}}. \outC{p_s})\big) \parallel C
%\end{multline*}
%One possible reduction is the one that consumes in the order  $p_i$, $f$, and that outputs $\outC{g}$. At this point $z_0$ synchronizes with the content of the adaptable process located at $r_0$ and the register itself is updated and reset to $\encp{r_0 = 0}{\mmn{2}}$
%It is easy to see that the obtained process is equivalent to $\encp{(s, 0, m_1)}{\mmn{2}}$, as desired. 
\end{description}

\noindent \emph{\underline{Item (2)}:} If $(i, m_0, m_1)\notminskred$ then $i$ corresponds to the $\mathtt{HALT}$ instruction.
Then, by Definition \ref{def:confe2}, 
its encoding into \evols{2} is as follows:
\begin{eqnarray*}
\encp{(i, m_0, m_1) }{\mmn{2}}  & = & 
 \outC{p_i} \parallel e  \parallel
 \encp{r_0 = m_0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \\
& & !p_i.\outC{h}.h.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}.\update{r_1}{\component{r_1}{!inc_1.\outC{u_1} \parallel \outC{z_1}}}.\outC{p_1} \parallel \\
           & & \prod_{l=1..n,l\neq i} \encp{(l:I_l)}{\mmn{2}} \parallel \reso{\alpha,\beta,\gamma}
\end{eqnarray*}

We then have: 
\begin{eqnarray*}
\encp{(i, m_0, m_1) }{\mmn{2}}  & \pired  \equiv & 
\outC{h}.h.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}.\update{r_1}{\component{r_1}{!inc_1.\outC{u_1} \parallel \outC{z_1}}}.\outC{p_1} \parallel \\
& & e  \parallel  \encp{r_0 = m_0}{\mmn{2}} \parallel \reso{\alpha,\beta,\gamma} \parallel S = P
\end{eqnarray*}
where $S =  \encp{r_1 = m_1}{\mmn{2}} \parallel \prod_{l=1}^{n}\encp{(l:I_l)}{\mmn{2}}$ stands for the rest of the system.
Starting from $P$, a possible sequence of reductions is the following:
\begin{eqnarray*} 
P & \pired^{*} &  \update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}.\update{r_1}{\component{r_1}{!inc_1.\outC{u_1} \parallel \outC{z_1}}}.\outC{p_1} \parallel \\
& & e  \parallel  \encp{r_0 = m_0}{\mmn{2}} \parallel \reso{\alpha+c,\beta,\gamma-c} \parallel S= P_{1}
\end{eqnarray*}
where the output on $h$ in $P$ interacted with  process $\reso{\alpha,\beta,\gamma}$
so as to replace $c$ outputs on $g$ with $c$ outputs on $f$.
After that,  a synchronization on $h$ took place between 
the evolutions of $\reso{\alpha,\beta,\gamma}$ and of $P$.
We now have:
 \begin{eqnarray*} 
P_{1} & \pired \equiv  & e  \parallel  \encp{r_0 = 0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \\
& & 
\update{r_1}{\component{r_1}{!inc_1.\outC{u_1} \parallel \outC{z_1}}}.\outC{p_1} \parallel \prod_{l=1}^{n} \encp{(l:I_l)}{\mmn{2}} \parallel \reso{\alpha+c,\beta,\gamma-c} \\
& \pired  & e  \parallel  \encp{r_0 = 0}{\mmn{2}} \parallel \encp{r_1 = 0}{\mmn{2}} \parallel \outC{p_1} \parallel \prod_{l=1}^{n} \encp{(l:I_l)}{\mmn{2}} \parallel \reso{\alpha+c,\beta,\gamma-c} 
\end{eqnarray*}
which % easily seen to 
corresponds to $\encp{(1, 0, 0) }{\mmn{2}}$. 
In turn, it can be seen  that $\encp{(i, m_0, m_1) }{\mmn{2}}\! \Downarrow_{\outC{p_1}}^1$.
%Moreover,  $\encp{(i, m_0, m_1) }{\mmn{2}}\! \Downarrow_{e}^1$.
%and has a barb on $p_{1}$, as desired.
%\begin{multline*}
% \outC{p_i} \parallel e  \parallel \encp{r_0 = 0}{\mmn{2}} \parallel \encp{r_1 = m_1}{\mmn{2}} \parallel \prod^{n}_{l=1} \encp{(l:I_l)}{\mmn{2}} \parallel \\
%!p_i.\outC{h}.h.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}.\update{r_1}{\component{r_1}{!inc_1.\outC{u_1} \parallel \outC{z_1}}}.\outC{p_1} \parallel C
%\end{multline*}
%It is easy to see that one possible reduction 
%of $\encp{(i, 0, m_1) }{\mmn{2}}$ 
%is the one that first synchronizes on $p_i$ and 
%then on $h$.
%Then, by means of process $!h.(g.\outC{f} \parallel \outC{h})$ (which is part of $\reso{n,m,k}$) 
%it generates all the consumed outputs on $f$ and synchronizes on $h$. Finally, after having reset both registers to zero, 
%%: i.e. $\encp{r_j = j}{\mmn{2}}$ for $j \in [1,2]$ 
%the resulting process 
%is ready to perform output on $p_1$, which triggers the first instruction of the \mm.
%This allows us to conclude  that $\encp{(i, m_0, m_1) }{\mmn{2}}\! \Downarrow_{p_1}^1$.
\end{proof}


\begin{remark}\label{rem:wrong}
It is instructive to identify the exact point in which an erroneous computation can be made when mimicking the behavior
of a decrement-and-jump instruction. Consider again the process $P'$, as analyzed in the case \textsc{M-Dec} above:
\begin{eqnarray*}
P' & = & u_0.(\outC{b} \parallel
           \outC{p_{i+1}}) +   z_0.\update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel      \\
           & & e  \parallel  \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}}\overline{u_0} \parallel \outC{z_0} }  \parallel           \reso{\alpha-1,\beta,\gamma+1} \parallel S
\end{eqnarray*}
where $S =  \encp{r_1 = m_1}{\mmn{2}} \parallel \prod_{l=1}^{n} \encp{(l:I_l)}{\mmn{2}}$ stands for the rest of the system.
Above, we analyzed the correct computation from $P'$, namely a synchronization on $u_{0}$:
\begin{eqnarray*}
P' & \pired &  \outC{p_{i+1}} \parallel  e \parallel \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}-1}\overline{u_0} \parallel \outC{z_0} }  \parallel           \reso{\alpha-1,\beta+1,\gamma+1} \parallel S ~= P'' 
\end{eqnarray*}
with $P'' \equiv \encp{(i+1,m_{0}-1,m_{1})}{\mmn{2}}$.
The erroneous computation takes place when there is a synchronization on $z_{0}$, rather than on $u_{0}$. We then have:
\begin{eqnarray*}
P' & \pired &  \update{r_0}{\component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}}}. \outC{p_s} \parallel  e \parallel \\
& & \component{r_0}{!inc_0.\outC{u_0} \parallel \prod^{m_{0}}\overline{u_0} }  \parallel           \reso{\alpha-1,\beta,\gamma+1} \parallel S \\
& \pired \equiv &   \outC{p_s} \parallel  e \parallel \component{r_0}{!inc_0.\outC{u_0} \parallel \outC{z_0}} \parallel   \reso{\alpha-1,\beta,\gamma+1} \parallel S~= P'''
\end{eqnarray*}
with $P''' \equiv \encp{(s,0,m_{1})}{\mmn{2}}$.
The side effect of the above erroneous computation can be seen on the number of copies of $\outC{b}$ that remain after
the (erroneous) synchronization on $z_{0}$.
In fact, while a correct computation (as $P''$ above) increases in one the number of such copies, 
in an incorrect computation (as $P'''$ above) the number of copies of $\outC{b}$ remains invariant. 
Notice also that copies of $\outC{b}$ can be only produced at the beginning of the execution of the encoding of the \mm.
This is significant since, as discussed at the end of the case \textsc{M-Inc}, the number of copies of $\outC{b}$ has a direct influence on potential deadlocks  of the encoding of a \mm.
\end{remark}

\begin{lemma}[Completeness]\label{l:compl-pase2}
Let $N$ be a \mm, if $N$ terminates then $\encp{N}{\mmn{2}} \barbw{e}$.
\end{lemma}
\begin{proof}
Recall that $N$ is said to terminate if there exists a computation 
$$(1,0,0) \minskred ^* (h, 0, 0)$$
such that $(h: \mathtt{HALT})$.
Lemma \ref{lem:correctstep} ensures the existence of a process $P$ such that 
$\encp{(1, 0, 0)}{\mmn{2}} \pired ^* P \equiv \encp{(h, 0, 0)}{\mmn{2}}$, with 
$P \Downarrow_{\outC{p_{1}}}^1$.
This  ensures that every time that the encoding of $N$ reaches $\mathtt{HALT}$ the simulation is restarted.
Therefore,  termination of $N$ ensures that $\encp{N}{\mmn{2}}$
has an infinite computation:
since the encoding always 
exhibits barb $e$, we can conclude that $\encp{N}{\mmn{2}} \barbw{e}$.
%Let us assume that $h$ is the label of the $\mathtt{HALT}$ instruction in $N$.
%It is enough to prove that if $N$ terminates then there exists a \mm evolution $$(1,0,0) \minskred ^* (h, 0, 0)$$ such that the corresponding simulation in \evols{2} passes infinite times from  $\mathtt{HALT}$ instruction.
%In other words, it is enough to prove that there exists a correct simulation of the \mm  such that $\encp{(1, 0, 0)}{\mmn{2}} \pired ^* \encp{(h, 0, 0)}{\mmn{2}}$.
%This follows from Lemma \ref{lem:correctstep}.
%Finally, as every time that the computation reaches the $\mathtt{HALT}$ instruction the simulation is restarted, we can conclude that there exists an infinite computation of $\encp{N}{\mmn{2}}$ and that  $\encp{N}{\mmn{2}} \barbw{e}$,  as desired.
\end{proof}




\begin{lemma}[Soundness]\label{l:sound-pase2} 
Let $N$ be a \mm. If $N$ does not terminate then $\encp{N}{\mmn{2}} \negbarbw{e}$.
\end{lemma}
\begin{proof}
It is enough to prove that if $N$ does not terminate 
(that is, if $N$ does not reach a  $\mathtt{HALT}$ instruction)
then all the computations of $\encp{N}{\mmn{2}}$ are finite.
Since the encoding can mimic the behavior of $N$ both correctly and incorrectly, we have two possible cases:
\begin{enumerate}
 \item
 In the first case, 
  the simulation of $\encp{N}{\mmn{2}}$ is correct and no erroneous steps are introduced.
%If this is the case, then  $\encp{N}{\mmn{2}}$ never reaches a $\mathtt{HALT}$ instruction.
Notice that at every instruction an output on $f$ is consumed permanently: these
copies of $\outC{f}$ are only recreated when invoking a $\mathtt{HALT}$ instruction, which converts every $\outC{g}$ into a $\outC{f}$.
Since a $\mathtt{HALT}$ instruction is never reached, 
new copies  of $\outC{f}$ are never recreated, and 
the computation of process $\encp{N}{\mmn{2}}$ has necessarily to be  finite.
%Notice that it can also happen that the evolution of $N$ reaches a state where the number of increments on the registers is greater than the number of outputs on $b$ produced in the initial phase of the simulation of $N$. Also in this case the computation of process $\encp{N}{\mmn{2}}$ is finite, as it is deadlocked on an increment instruction.

 \item 
 In the second case, 
 the simulation is not correct and one or more wrong guesses occurred
 in the simulation of a decrement-and-jump instruction.
 Here, in addition to the possibility of deadlocks described in Item (1) above, 
 erroneous computations constitute another source of deadlocks.
 In fact, as detailed in Remark \ref{rem:wrong}, 
 for each one of such wrong guesses a copy of $\outC{b}$ is permanently lost. 
% 
% : i.e. the encoding chooses to jump even if the register is not empty. In this case, supposing that the register represents number $m$, $m$ resources of kind $b$ are lost and can never be reintroduced along the computation: indeed 
%(i) process $!a.(\outC{f} \parallel \outC{b} \parallel  \outC{a})$ is deadlocked and can never be reactivated once the process start the simulation of the \mm $N$;
%(ii) $b$ can only be emitted in case of a decrement, and decrements can only happen if the register contains a number of outputs on $u$ different from zero.
An arbitrary number of wrong guesses may thus lead to a state in which there are no outputs on $b$.
As discussed at the end of the case of the \textsc{M-Inc} in the proof of Lemma \ref{lem:correctstep}, 
the encoding of an increment instruction
reaches a deadlock if a copy of $\outC{b}$ is not available.
This means that wrong guesses in simulating a decrement-and-jump instruction may induce deadlocks when simulating an increment instruction.
%Recall that we assume that the \mm has at least one increment instruction, so even a single wrong guess may lead to a deadlock of $\encp{N}{\mmn{2}}$... \emph{complete}
%In general, if there are as many wrong 
%
%Hence, since the encoding is making wrong guesses, it means that $N$ contains at least one increment instruction that the encoding is performing
%an infinite number of times. But, as the number of outputs on $b$ is finite, and as some of this resources are consumed and never reintroduced in the system, at a certain point the encoding will run out of $b$s and it will deadlock on an increment instruction. Thus resulting in a finite computation of $\encp{N}{\mmn{2}}$.
%Notice that as before, it can also happen that the number of instruction performed by $N$ regardless of the errors introduced is greater than the number of available resources of kind $f$, thus resulting in a finite computation of $\encp{N}{\mmn{2}}$.
\end{enumerate}

%\begin{enumerate}
% \item The simulation of $\encp{N}{\mmn{2}}$ is correct: no erroneous steps are introduced.
%If this is the case, the process encoding the $\mathtt{HALT}$ instruction  is never reached. Moreover as at every instruction an output on $f$ is consumed (and never reintroduced), and as the number of available  outputs on $f$ is finite, the computation of process $\encp{N}{\mmn{2}}$ has to be finite.
%Notice that it can also happen that the evolution of $N$ reaches a state where the number of increments on the registers is greater than the number of outputs on $b$ produced in the initial phase of the simulation of $N$. Also in this case the computation of process $\encp{N}{\mmn{2}}$ is finite, as it is deadlocked on an increment instruction.
%
% \item The simulation is not correct. This can only happen because of a wrong guess in case of a decrement and jump instruction: i.e. the encoding chooses to jump even if the register is not empty. In this case, supposing that the register represents number $m$, $m$ resources of kind $b$ are lost and can never be reintroduced along the computation: indeed 
%(i) process $!a.(\outC{f} \parallel \outC{b} \parallel  \outC{a})$ is deadlocked and can never be reactivated once the process start the simulation of the \mm $N$;
%(ii) $b$ can only be emitted in case of a decrement, and decrements can only happen if the register contains a number of outputs on $u$ different from zero.
%Hence, since the encoding is making wrong guesses, it means that $N$ contains at least one increment instruction that the encoding is performing
%an infinite number of times. But, as the number of outputs on $b$ is finite, and as some of this resources are consumed and never reintroduced in the system, at a certain point the encoding will run out of $b$s and it will deadlock on an increment instruction. Thus resulting in a finite computation of $\encp{N}{\mmn{2}}$.
%Notice that as before, it can also happen that the number of instruction performed by $N$ regardless of the errors introduced is greater than the number of available resources of kind $f$, thus resulting in a finite computation of $\encp{N}{\mmn{2}}$.
%\end{enumerate}

Hence, as all the computations of $\encp{N}{\mmn{2}}$ are finite, therefore $\encp{N}{\mmn{2}}$  barb $e$
cannot be exposed an infinite number of times.
\end{proof}

We are now ready to repeat the statement of Lemma \ref{th:corrE2}, in page \pageref{th:corrE2}:

\begin{lemma}[\ref{th:corrE2}]
Let $N$ be a \mm. $N$ terminates iff $\encp{N}{\mmn{2}} \barbw{e}$.
\end{lemma}

\begin{proof}
It follows directly from Lemmas \ref{l:compl-pase2} and \ref{l:sound-pase2}.
\end{proof}