%!TEX root = paperEdit.tex
\section{ Reverse-engineering Methodology}
\label{methodology}

\comment{We have modeled the reverse-engineering of the APDU as an inference
problem. REPROVE has a built-in inference engine that allows us to
plug-in different knowledge bases such as models, abstractions and
specifications of the protocol. Our reverse-engineering algorithm
constructs and refines the possible mappings while extracting 
abstract properties and functionalities.}{paraphrased it -- hard-coded knowledge (reviewer's comment)}


The background knowledge to our problem
%, which is modeled in first-order
%logic, 
consists of abstract models, which need to be instantiated according to
the input trace. The models are based on ISO 7816 that define:
\begin{inparaenum}[(\itshape i\upshape)]
\item the main properties, the restrictions and requirements of
communication,
\item possible implementations of the on-card operations,
\item possible implementations of specific RSA PKCS\#11 functions.
\end{inparaenum} 
\comment{Such models do not hard-code the implementation of the card. They
present abstractions of different functionalities that are then
refined according to the input trace. We assume that each command has
a unique representation and that commands have preconditions they
should satisfy to be applicable at any point in time. We have modeled
the background knowledge in first-order logic as it is
machine-readable and expressive enough to model the protocol's rules.}{hard coded knowledge (reviewer's comment)}
% without constraining the scenario and capture the
%inference rules of our reverse-engineering algorithm.
%Although
%there are other type of logics, we have preferred first-order logic for
%another reason: the failure-driven nature of our problem implies that the 
%reverse-engineering can benefit by a diagnosis-repair mechanism, such as in \cite{bundymcneill06} which is based on first-order.


\comment{More formally, REPROVE applies the transformation function
  $y(f(x))$ with $f: T^n \rightarrow I^n$ and $y: I^n \rightarrow
  O^m$, where $T^n$ is an input trace of $n$ commands, $I^n$ is a set
of $n$ inter-industry commands and $O^m$ is a set of $m$ on-card
operations.}{substituted a paragraph that explains the transformations
during the analysis with this. }


\eat{and, based on the knowledge base, it
executes a five-step reverse-engineering:
\begin{compactenum}[\itshape step \upshape 1.]
\item produce the semantic mappings of each command.
\item narrow-down potential mappings.
\item identify card sub-functionalities, \ie the groups of commands that implement specific card operations.
\item identify card functionalities, \ie the operations executed by the card.
\item produce model(s) of the reverse-engineered  RSA PKCS\#11 function in terms of card
operations.
\end{compactenum} 
The final output of the system is the result of each reverse-engineering step. }
 
\subsection{Modeling the APDU layer}

For reverse-engineering the \func{C\_logIn} function we have modeled
the following commands based on ISO 7816:
\begin{center}
\begin{scriptsize}
\begin{tabular}{l|l|l|l}
\cmd{select} & \cmd{get\_data} & %\\ \hline
\cmd{read\_binary} & \cmd{read\_record} \\ \hline
\cmd{update\_binary} & \cmd{update\_record} & %\\ \hline 
\cmd{erase\_binary} & \cmd{erase\_record} \\ \hline

\cmd{write\_binary} & \cmd{write\_record}& \cmd{activate\_file}&\cmd{put\_data} \\ \hline

 \cmd{get\_response} & %\\ \hline 
\cmd{append\_record} & \cmd{create\_file} & \cmd{append\_file}\\ \hline
\cmd{get\_challenge} & \cmd{verify} & %\\ \hline 
\cmd{internal\_authenticate} & \cmd{external\_authenticate} \\ \hline 
\cmd{general\_authenticate} & \cmd{mutual\_authenticate} &% \\ \hline


\end{tabular}
\end{scriptsize}
\end{center} 

In Figure~\ref{fig:decomposition} we show a high-level description of
our modeling approach. Each individual card operation
(\textit{functionality}) of the card, is decomposed into a sequence of
steps (\textit{sub-functionalities}). Each step is then implemented as
a sequence of APDU commands: proprietary, inter-industry, or a mix of
them. The APDU commands are further characterized depending on their
data exchange properties (shown, for example, as `\texttt{YY}' in the
figure to indicate a command that both sends and receives data) and
their role within the sub-functionality in question (core, additional,
or dummy). The same command may have different data exchange
properties and different roles depending the  sub-functionality,
\eg commands $\textrm{command}_a$ and $\textrm{command}_x$ in
Figure~\ref{fig:decomposition}.

\begin{figure}[!tb]
  \centering
    \includegraphics[width=\textwidth, height=0.25\textheight]{decomposition}
%  \includegraphics[width=\linewidth]{decomposition}
  \caption{A single operation represents a specific functionality and it is
   modeled as a sequence of sub-functionalities. Each sub-functionality is
   further implemented as a sequence of commands. Commands are characterized
   by their data exchange properties and role within some particular
   sub-functionality.}
   \label{fig:decomposition}
\end{figure}


%\subsubsection{APDU commands} 
\stitle{APDU commands} 
An APDU command is represented as a predicate \\ 
$\cmdvar{command}(\var{Cla},
\var{Ins}, \var{P1}, \var{P2}, \var{Lc}, \var{D}, \var{Le})$ where the
variables \var{Cla}, \var{Ins}, \var{P1}, \var{P2}, \var{Lc}, \var{D},
\var{Le} are instantiated according to the semantics of the
command. We consider the following commands as valid:
\begin{inparaenum}[(\itshape i\upshape)]
\item any inter-industry command; and
\item any proprietary command that can be mapped to an inter-industry
  command iff this inter-industry command has not occurred
  \comment{within the same}{why? is footnote enough?}
  implementation\footnote{We consider each command representation
    unique: an inter-industry command cannot also occur as a
    proprietary one, within the same trace. }, and has all its preconditions
  satisfied.
\end{inparaenum} 
A command is categorized  based on:
\begin{inparaenum}[(\itshape i\upshape)]
\item  it's data exchange properties; and 
\item the card operations. 
\end{inparaenum}

%\paragraph*{Categorization according to data exchange properties} 
\stitle{Categorization according to data exchange properties} Each
category represents the data exchange properties of a command
and is presented as  7-ary predicate:
\begin{compactenum}[(\itshape i\upshape)]
  \item \small $\cmdvar{command}_{nn}(\var{Cla}, \var{Ins}, \var{P1}, \var{P2},
    \var{Lc}, \var{D}, \var{Le})$: no data is sent, no data is expected,
  \item  \small $\cmdvar{command}_{ny}(\var{Cla}, \var{Ins}, \var{P1}, \var{P2},
    \var{Lc}, \var{D}, \var{Le})$: no data is sent, data is expected,
  \item \small $\cmdvar{command}_{yy}(\var{Cla}, \var{Ins}, \var{P1}, \var{P2},
    \var{Lc}, \var{D}, \var{Le})$: data is sent, data is expected,
  \item \small $\cmdvar{command}_{yn}(\var{Cla}, \var{Ins}, \var{P1}, \var{P2}, 
    \var{Lc}, \var{D}, \var{Le})$: data is sent, no data is expected.
 \end{compactenum}
\normalsize
 Variables \var{Lc}, \var{D} and \var{Le} define the
 category of a command. For instance, if $\var{Lc} \neq \val{00}$ and
 $\var{D} \neq \val{00}$ then the command sends some data \var{D} with
 length \var{Lc} to the card. If \var{Le} is not
 \val{null}\footnote{Null means absence of a field.} then the
 response will be some data with length \var{Le}. The above is
 captured by the following rule:
\small
 \begin{align*}
  \forall \var{Cla}, \var{Ins}, \var{P1}, \var{P2}, \var{Lc}, 
    \var{D}, \var{Le}, & (
 (\cmdvar{command}(\var{Cla}, \var{Ins}, \var{P1}, \var{P2},
    \var{Lc}, \var{D}, \var{Le}) \\
  & \land \var{Lc} = \val{00} \land \var{D} = \val{00} \land 
    \var{Le} \neq \val{null}) \\
&	\rightarrow (\cmdvar{command}_{ny}(\var{Cla}, \var{Ins}, \var{P1},
  \var{P2}, \var{Lc}, \var{D}, \var{Le}))
\end{align*}
\normalsize
Similar rules are defined for each category.
\eat{
assigns a command to the $\cmdvar{command}_{ny}$ category under the condition
that \var{Lc} and $D$ are instantiated to $00$ and $Le$ is not
$null$}


%\paragraph*{Categorization according to card operations}  
\stitle{Categorization according to card operations}  
For each operation of the card we have categorized the commands into:
\begin{compactenum}[(\itshape i\upshape)]
%%%% core and additional commands
\item \textit{Core}: the basic commands that perform the operation,
\eg to create a new file \cmd{create\_file} is a core  command. 
 \eat{\normalsize We have grouped the core commands that implement the same
 abstraction of different card operations, to create command sets that have a
 similar outcome. For example, to extract some data from the card one might
 send a \cmd{get\_data}  command along with some meta-data.
 Another way to implement this (depending on the card's data organization) is
 by reading the data from the card with a \cmd{read\_binary} command. }
\item \textit{Additional}: the commands that add extra properties
to the operation, but they do not change its meaning; the same operation can
be implemented without them. For example, to create a file
\cmd{select} is an additional command as it merely adds information to file creation
(\eg selecting a path to create the file into) but file creation can proceed
without it.
\eat{ For example, for the card reader authentication we consider \cmd{verify} to
be a core command. During the authentication process,  a  path might be
selected, where the authentication will take place. Thus, we consider
\cmd{select}  as an additional command for this operation. This can also apply
vice versa: for the selection of a file \cmd{select}  is a core command. However,
before any operation is executed, authentication via a \cmd{verify} command might
take place. Thus, \cmd{verify} is an additional command for this operation.}
%% dummy commands
\item \textit{Dummy}: the commands that neither send nor expect any
data. They usually just query, or check, the communication with the card.
For example, a \cmd{verify} command when it does not send nor expect any data
to/from the card. Such commands may occur any time during the communication
and they do not change the output of the reverse-engineering.
\end{compactenum}






%\paragraph*{Command preconditions} 
\stitle{Command preconditions} 
The preconditions of each command define:
\begin{inparaenum}[(\itshape i\upshape)]
\item restrictions on the types of previously issued commands, 
\item the values of their parameters, 
\item different semantics for the same command, and
\item data types and file structures.
\end{inparaenum}
For instance, the common use of  \cmd{read\_binary}  is  to access the content of an
EF file. Yet, if the value of \var{P1} is
between \val{128} and \val{160} then \cmd{read\_binary}  is used to
select an EF file. This precondition is modeled as:
\small
\begin{align*}
 \forall \var{Cla}, \var{Ins}, \var{P1}, \var{P2}, \var{Lc}, 
    \var{D}, \var{Le}, &((
  \cmdvar{command}(\var{Cla}, \var{bo}, \var{P1}, \var{P2},
    \var{Lc}, \var{D}, \var{Le})
    \land \var{P1} \in [\val{128}, \val{160}]) \\
 &\rightarrow (\cmd{select}(\val{file}, \var{D}) \land \cmd{isa}(\var{D},
 \val{EF})))
\end{align*}
\eat{
\begin{eqnarray*}
 \lefteqn{  \forall \var{Cla}, \var{Ins}, \var{P1}, \var{P2}, \var{Lc}, 
    \var{D}, \var{Le}( (}\\
  \lefteqn{  \cmdvar{command}(\var{Cla}, \var{bo}, \var{P1}, \var{P2},
    \var{Lc}, \var{D}, \var{Le})} \\
   \lefteqn{\land \var{P1} \in [\val{128}, \val{160}])} \\
 &\rightarrow (\cmd{select}(\val{file}, \var{D}) \land \cmd{isa}(\var{D},
 \val{EF})))
\end{eqnarray*}
}
\normalsize
%\paragraph*{Card operations} 
\stitle{Card operations}
We introduce a hierarchy of abstractions, that we term \textit{Functionality} models, which capture the
high-level semantics of the card
operations. \textit{Functionality} models provide high-level views of
different operations of a card, and \textit{sub-functionality} models
describe the steps by which each operation is implemented.
\eat{We introduce the term \textit{ functionality} to define a specific card
operation that is implemented by a set of command-response pairs. We consider
two levels of abstraction:
\begin{inparaenum}[(i)]
 \item \textit{Functionality}, which is the most abstract description of such operations.
\item  \textit{Sub-functionality}, which is a description of each operation
with respect to the corresponding command-response pairs.
\end{inparaenum}} 
A valid (sub-)/functionality has:
\begin{inparaenum}[(\itshape i\upshape)]
\item all its preconditions satisfied by the commands \footnote{Under the condition that the response is positive \ie \val{90 00}.}
seen so far, or
\item has a subset of its preconditions satisfied by the commands seen so
 far, but it is possible to satisfy the rest of them by the commands that
 will follow, \ie the (sub-)/functionality is partially satisfiable).
\end{inparaenum}	

\eat{Each (sub-)/functionality is a predicate of arity three:\\
\sFnclty{sub$-$functionality(Name, Preconditions, Postconditions)}\\
\fnclty{functionality(Name, Preconditions, Postconditions)}\\
where \var{Name }is the name of the (sub-)/functionality,\\ \var{Preconditions} is the set of commands that need to be sent and \var{Postconditions} is the set of effects of that (sub-)/ functionality.
}


%%% edo
%\paragraph*{Sub-functionalities} 
\stitle{Sub-functionalities} 
Sub-functionalities model the steps that a
card performs to implement specific operations. The same sub-functionality
may have more than one model and it consists of one or more commands. In
Table~\ref{tab:subfunctionalities}, we present the sub-functionalities we
have defined and their corresponding core commands. For example,
\sfnclty{external\_authenticated}(\var{RD}, \var{D})  describes the
authentication of the reader through the challenge-response
protocol. The card issues a challenge \var{RD} and the reader authenticates
itself by providing the corresponding response. The following rule
describes this operation:
\small
\begin{align*}
 \forall \var{RD}, \var{Le}, 
    \var{P1}, \var{P2}, \var{Lc}, \var{D}, & (
      (\cmdvar{command}(\val{00}, \val{84}, \val{0}, \val{0}, 
        \val{0}, \val{0}, \var{Le}) \land \cmd{response}(\var{RD}) \\
        & \land \cmdvar{command}(\val{00}, \val{87}, 
        \var{P1}, \var{P2}, \var{Lc}, \var{D}, null)
        \land \var{P2} \in [\val{128}, \val{256}])\\
        & \rightarrow \cmd{external\_authenticated}(\var{RD}, \var{D}))
        \end{align*}
\normalsize
\eat{
\begin{eqnarray*}
 \lefteqn{\forall \var{RD}, \var{Le}, 
    \var{P1}, \var{P2}, \var{Lc}, \var{D}, (}\\ 
      \lefteqn{(\cmdvar{command}(\val{00}, \val{84}, \val{0}, \val{0}, 
        \val{0}, \val{0}, \var{Le}) \land \cmd{response}(\var{RD})}\\
        \lefteqn{ \land \cmdvar{command}(\val{00}, \val{87}, 
        \var{P1}, \var{P2}, \var{Lc}, \var{D}, null)}\\
        \lefteqn{ \land \var{P2} \in [\val{128}, \val{256}])}\\
        & \rightarrow \cmd{external\_authenticated}(\var{RD}, \var{D}))
        \end{eqnarray*}
}
\eat{				
 \begin{eqnarray*}
 \lefteqn{\forall   RD, Le_1,  P1_2, P2_2, Lc_2, D_2, ( }\\ 
				\lefteqn{(command(00, 84, 0, 0, 0, 0, Le_1) \land response(RD)}\\
				\lefteqn{ \land command(00, 87, P1_2, P2_2, Lc_2, D_2, null)}\\
				\lefteqn{ \land P2 \in [128,256])}\\
				& \rightarrow external\_authenticate(RD, D_2))
				\end{eqnarray*}
}		
\noindent
which says that if the command with $\var{Ins} = \val{84}$  with a response
of the card \var{RD}, is followed by the  command with $\var{Ins} = \val{87}$
then the reader has authenticated itself  via a challenge-response  external
authentication. For the reverse-engineering of the RSA PKCS\#$11$ \func{C\_logIn}
function, we categorize each sub-functionality as being either:
%\begin{itemize}
%\item \textit{core} 
%\item \textit{additional} 
%\end{itemize}
\begin{inparaenum}[(\itshape i\upshape)]
\item a \textit{sensitive operation}: any process that we expect to
deal with sensitive data, \eg the verification of a PIN; or
\item a \textit{non-sensitive operation}: any generic process over
non-sensitive data, \eg the selection of a file.
\end{inparaenum}


\eat{If a functionality is a \textit{sensitiveOperation}, then all core sub-functionalities inherit this property, while all additional sub-functionalities are \textit{nonSensitiveOperation} and \textit{vice versa}.}



\begin{table}[!tb]
  \centering
\begin{scriptsize}
  \begin{tabular}{l|l}
  
    \textbf{Sub-functionality} & \textbf{Core command set} \\ \hline  \hline
	   \sfnclty{selected} & $\{\cmd{select}, \cmd{read\_binary}\}$ \\ \hline
	   \sfnclty{read\_data\_sub} & $\{\cmd{get\_data}, \cmd{read\_binary},
       \cmd{get\_response}, \cmd{read\_record}\}$\\ \hline
	   \sfnclty{data\_updated} & $\{\cmd{update\_binary}\}$ \\ \hline
	   \sfnclty{file\_created} & $\{\cmd{create\_file}\}$ \\ \hline
	   \sfnclty{data\_written} & $\{\cmd{write\_binary}, \cmd{update\_binary},
       \cmd{write\_record}\}$ \\ \hline
	   \sfnclty{challenge\_sent} & $\{\cmd{get\_challenge}\}$ \\ \hline
	   \sfnclty{verified} & $\{\cmd{verify}\}$ \\ \hline
	   \sfnclty{external\_authenticated} & $\{\cmd{external\_authenticate}\}$ \\ \hline
	   \sfnclty{internal\_authenticated} & $\{\cmd{internal\_authenticate}\}$ \\ \hline
	   \sfnclty{mutual\_authenticated} & $\{\cmd{mutual\_authenticate}\}$ \\ 
 
  \end{tabular}
\end{scriptsize}
  \caption{Card sub-functionalities and the corresponding core commands.}
  \label{tab:subfunctionalities}
\end{table}


%\paragraph*{Functionalities} 
\stitle{Functionalities} 
Functionalities model the on-card operations.  As there are different implementation methods,
 each functionality consists of a set of possible  core and additional sub-functionalities
plus some  dummy commands.  For example, consider two
cards $\textrm{Card}_x$ and $\textrm{Card}_y$ which both store data
(\fnclty{store\_data}). $\textrm{Card}_x$ performs this operation
through a \sfnclty{file\_created} sub-functionality, while
$\textrm{Card}_y$ through a \sfnclty{data\_written}. 
Table~\ref{tab:corefunctionalities} presents the defined functionalities  and their 
corresponding sub-functionality sets.  The core sub-functionalities are extracted on the
basis that at least one of them (but potentially more) are necessary
for the implementation of the functionality.  Additional
sub-functionalities may appear in the implementation, but are not
required.


\begin{table}[h!]
\begin{scriptsize}
  \centering
  \begin{tabular}{l|p{5.8cm}|p{4.4cm}}
  \textbf{Functionality} & \textbf{Core sub-functionality set} & \textbf{Additional sub-functionality set} \\ \hline  \hline
	\fnclty{store\_data} & $\{\sfnclty{file\_created},
       \sfnclty{data\_written},  \sfnclty{data\_updated}\}$ 
      &  $\{\sfnclty{selected}, \sfnclty{retrieve\_data}\}$ \\  \hline
	\fnclty{read\_data} & $\{\sfnclty{read\_data\_sub}\}$ &
      $\{\sfnclty{selected}\}$  \\ \hline
	\fnclty{authenticated} &  $\{\sfnclty{challenge\_sent},
      \sfnclty{verified}, \newline \sfnclty{external\_authenticated}, 
      \sfnclty{internal\_authenticated}, \newline 
      \sfnclty{mutual\_authenticated}\}$ & 
      $\{\sfnclty{selected}, \sfnclty{retrieve\_data},
      \newline \sfnclty{data\_written}\}$ \\  
  \end{tabular}
\end{scriptsize}
  \caption{Card functionalities and the corresponding sub-functionality set.}
  \label{tab:corefunctionalities}
\end{table}







%\paragraph*{General rules}
\stitle{General rules}
We define rules on predicates to describe
communication restrictions,  card responses, file specifications and data
types. For instance, the following rule requires that if some data \var{D}
of length \var{Le} is expected, then the response should contain \var{D} and
the corresponding length should be \var{Le}.
\small
\begin{equation*}
 \forall \var{Le}, \var{D} (\cmdvar{expected}(\val{data}, 
 \var{Le}, \var{D})
 \rightarrow (\cmdvar{response}(\var{D}) \land \cmdvar{length}(\var{D}, 
 \var{Le})))
 \end{equation*}
\eat{
\begin{eqnarray*}
 \lefteqn{\forall \var{Le}, \var{D} (\cmdvar{expected}(\val{data}, 
 \var{Le}, \var{D})}\\ 
 &\rightarrow (\cmdvar{response}(\var{D}) \land \cmdvar{length}(\var{D}, 
 \var{Le})))
 \end{eqnarray*}
\normalsize 
}










\subsection{RSA PKCS\#11 models}

\comment{RSA PKCS\#11 models present our assumptions on how specific
cryptographic functions might be implemented at the APDU level and are
expressed in terms of functionalities. These models present an
abstraction of the expected on-card operations. They do not impose an
implementation but merely act as a flexible guide of the implemented
functionality. We provide a summary of our modeling assumptions for
the \func{C\_login} function and some example rules.}{paraphrased it : hard-coded knowledge (reviewer's comment)}


%\paragraph*{RSA PKCS$\#$11 \func{C\_logIn}}
\stitle{RSA PKCS$\#$11 \func{C\_logIn}}
For the \func{C\_logIn} function we expect one of the authentication
operations: a PIN/Pass-code verification, or a challenge-response one.
Invocation of the \fnclty{read\_data} functionality for
authentication-related data is also possible, as is a second
authentication for the given session.

Authentication is defined with respect to ISO 7816:
\begin{inparaenum}[(\itshape i\upshape)]
\item  authentication with PIN: the card compares received data from the outside world with internal data;
\item  authentication with key: an entity to be authenticated has to prove the knowledge of a relevant key on the challenge-response procedure;
\item data authentication: using internal data, secret or public, the card checks data received by the outside world. Another way is for the card to check secret internal data and compute a data element (cryptographic checksum or digital signature) and insert it to the data sent to the outside world;
\item data encipherment: using secret internal data, the card enciphers a cryptogram received in a data field, or using internal data (secret or public) the card computes a cryptogram and inserts it in a data field, possibly together with other data.
\end{inparaenum}

\eat{a two-level authentication:  
first the reader authenticates itself to the card for the given session, and then, 
it provides a PIN/Pass-code to log-in.} 
The following rules describe these assumptions:
\begin{compactenum}[Rule 1]
%\item $\fnclty{authenticated}(\var{TypeA}, \var{TypeB}) \\
%  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: \\
\item \small $\fnclty{authenticated}(\var{TypeA}, \var{TypeB}) 
  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: 
	%There are two possible types of authentication:
  %\begin{inparaenum}[(\itshape i\upshape)]
  %\item through a PIN/Password, and 
  %\item through the challenge-response protocol.
  %\end{inparaenum}
	
%\item $(\sfnclty{selected}(\val{path}/\val{file}, \var{DF}) \\
%  \land \fnclty{authenticated}(\var{TypeA}, \var{TypeB})) \\ 
%  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: \\
\item  \small $(\sfnclty{selected}(\val{path}/\val{file}, \var{File})
  \land \fnclty{authenticated}(\var{TypeA}, \var{TypeB})) \\
  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: 
    %The file or path in which the authentication takes place is selected.
    %Then, authentication takes place as described in Rule 1. 
%\item  $(\fnclty{read\_data}(\var{Location}, \var{File}, \var{RD}) \\ 
%  \land \fnclty{authenticated}(\var{TypeA}, \var{TypeB})) \\ 
%  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: \\
\item  \small $(\fnclty{read\_data}(\var{Location}, \var{File}, \var{RD}) 
  \land \fnclty{authenticated}(\var{TypeA}, \var{TypeB})) \\ 
  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: 
    %Authentication-related data is retrieved from the card and
    %authentication takes place as described in Rule 1.
%\item $(\fnclty{read\_data}(\var{Location}, \var{File}, \var{RD}) \\ 
%  \land \sfnclty{selected}(\val{path}/\val{file}, \var{DF}) \\
%  \land \fnclty{authenticated}(\var{TypeA}, \var{TypeB})) \\
%  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: \\
\item \small $(\fnclty{read\_data}(\var{Location}, \var{File}_1, \var{RD}) 
  \land \sfnclty{selected}(\val{path}/\val{file}, \var{File}_2) \\
  \land \fnclty{authenticated}(\var{TypeA}, \var{TypeB})) 
  \rightarrow \cmd{log\_in}(\var{TypeA}, \var{TypeB})$: 
    %Authentication-related data is retrieved from the card, the file/path in
    %which authentication takes place is selected and, authentication takes
    %place as  described in Rule 1.		
\end{compactenum}
\normalsize Where $\var{TypeA}$/$\var{TypeB}$ are instantiated to a PIN/Passcode,
or challenge/response; $\var{File}$ is instantiated to a file;
$\var{Location}$ to a path in which authentication takes place; and
$\var{RD}$ to the data returned by the card.




%\newpage
\subsection{Reverse-engineering algorithm}

\eat{
\begin{figure}[!tb]
%\begin{wrapfigure}[21]{r}{0.42\linewidth}
  \vspace{-6ex}
  \centering
%  \includegraphics[width=0.42\textwidth, height=0.40\textheight]{flowchart}
  %\includegraphics[width=\linewidth]{reproveAnalysis}
  \vspace{-4ex}
  \caption{Reverse-engineering of commands: a high-level
    view.} 
  \label{flow} 
%\end{wrapfigure}
\end{figure}
}

\ncomment{The algorithm consists of three steps, each addressing 
a different abstraction of the implementation: 
\begin{inparaenum}[(i)]
\item the APDU trace semantics, 
\item the on-card operations that are executed during 
  communication, and
\item the APDU implementation given a PKCS\#11 function.
\end{inparaenum}}
{new  all  above check for correctness?}

\ncomment{\textit{Step 1: Semantics of the APDU Trace.} }{new all
  above} Given an input trace $T^n$ of $n$ commands, we generate a
tree in which each path from root to leaf ${T^n_i}'$ is a semantic
mapping of the trace such that \ncomment{$T^n \mapsto {T^n_i}'$}{was
  $T \mapsto \ldots$}. As the exchange of the command-response pairs
is sequential so is the analysis of the commands, which implies that
the semantics of an unknown command heavily depends on the previous
commands. Each unknown command is categorized and all corresponding
mappings $M$ are identified, which are then narrowed-down to a set
$P'$ based on precondition satisfiability. For each mapping $m\in P'$,
the commands analyzed so far are grouped, and sets that fully or
partially\footnote{Given a sub-functionality, there exists at least
  one core command that satisfies its preconditions.} satisfy any
sub-functionality are considered valid. The outcome of this process is
a set of valid\footnote{Valid here indicates that neither the ISO, nor
  any background model is violated.} mappings $M''$ of each unknown
command such that \ncomment{$M''\subseteq P'\subseteq M$}{you had $M'$
  but that was not defined so I changed it to $P'$?}, and the set
$P$ which consists of different interpretations of $T$. More formally, 
Step 1 performs the transformation $f:
f(T^n)=P^n$ where \ncomment{$\forall {T^n_i}' \in
P^n: {T^n_i}' \mapsto T^n$.}{should that be $T^n \mapsto {T^n_i}'$?}
%P=\{ {T^n}'_1, {T^n}'_2+1,..,{T^n}'_i\}$, and $T \mapsto {T^n}'_i$, $T \mapsto {T^n}'_i+1$,.., $T \mapsto {T^n}'_i$.

\begin{wrapfigure}[7]{r}{.25\linewidth}
  \vspace{-6ex}
  \centering
%  \includegraphics[width=0.48\textwidth]{trees}
  \includegraphics[width=\linewidth]{trees}
  \vspace{-5ex}
  \caption{ Reducing the search space. } 
  \label{fig:trees} 
\end{wrapfigure}


\ncomment{\textit{Step 2: On-Card Operations.}}{new all above} At this
stage, given $P^n$ from the previous step, the commands at each ${T^n_i}'
\in P^n$ are grouped in all possible combinations. Each group is
checked on whether there exist any sub-functionality(ies) that satisfy
its preconditions. The outcome of this process is a set $S^l$ of
sub-functionalities such that $\forall {S^l}_k \in S^l \exists
{T^n_i}' \in P^n: {T^n_i}' \mapsto {S^l}_k $. Then all
sub-functionalities in $S^l$ are grouped and the set of valid
functionalities $O^m$ is identified. The sub-functionalities that do
not satisfy $O^m$ are discarded along with the corresponding trace
mappings. The overall step can be presented as a function $y$: \small
$y(P^n) = O^m$ with ${S^l}' \mapsto O^m$, ${S^l}' \subseteq S^l$,
and ${P^n}' \mapsto {S^l}'$, ${P^n}' \subseteq P^n$.


\ncomment{\textit{Step 3: PKCS\#11 Function.} }{new all
  above} Here, the set of functionalities $O^m$ from Step 2 is
mapped to the background models of specific PKCS\#11 functions,
resulting in an interpretation of the communication in terms of the
standard. The outcome is the APDU mapping to PKCS\#11, the set of
card operations that are executed during the communication, 
${O^m}' \subseteq O^m$, and the  APDU traces
${T^n_i}' \in {T^n}'$ that satisfy them.




\eat{A high-level view of the reverse-engineering technique implemented at REPROVE is presented in Figure~\ref{flow}. 
The reverse-engineering algorithm consists of five steps, each addressing a different abstraction of the implementation. During the first step we produce a 
tree that represents all possible command mappings given the input trace. We then,  follow a  a depth-first algorithm to identify the valid mappings. Since the communication of the card is serial, i.e. the trace 
presents a series of commands sent consecutively, we follow the same principle during our reverse-engineering: we identify the valid mappings by searching the maximal depth, which is the number of commands appearing at the trace. 
It is crucial that REPROVE is capable of reverse-engineering  proprietary implementations of the protocol, 
 the search process is not constrained  and all possible situations are considered. REPROVE uses an exhaustive algorithm.  
 As this is  a combinatorial problem, to avoid search space
 explosion,  at each step  the potential mappings are narrowed-down.  }


%\begin{figure}[!tb]
%\end{figure}

\begin{wrapfigure}[23]{r}{.55\linewidth}
  \vspace{-5ex}
  \centering
%  \includegraphics[width=0.50\textwidth]{AnalysisOveriew}
  \includegraphics[width=.95\linewidth]{AnalysisOveriew}
  %[width=8.75cm,height=8cm]
  \vspace{-3ex}
  \caption{The transformations of the APDU trace during the
    reverse-engineering process.} 
  \label{fig:analysisOverview} 
%\end{figure}
\end{wrapfigure} 


Figure~\ref{fig:trees} shows how we restrict the search space: grey
arrows indicate narrowing-down and black arrows indicate mapping; each
path of a black tree is an individual mapping of the same APDU
trace. The nodes appearing at the same depth represent
different mappings of the same command; each path of a grey tree
represents a sequence of executed card operations
(sub-/functionalities). Step 1 generates a tree of all the command
mappings, where each path is a different trace mapping. The mapping of
a command affects the mapping of the following command, thus, not
all paths have a valid depth \ie the same as the number of commands
in the trace. In Step 2 the command paths of valid length are
mapped to functionality paths (on-card operations). Finally, Step 3
discards functionality paths that do not match with the PKCS\#11
models.




 In each reverse-engineering step the
 low-level input (commands) evolves to abstract models (card
 operations). A schematic description of the transformations of the
 commands during the reverse-engineering process is presented in
 Figure~\ref{fig:analysisOverview}. The trace itself goes through a
 sequence of transformations: from commands, to inter-industry
 mappings, to potential sub-functionalities, to groups of
 sub-functionalities into higher-level functionalities.  If REPROVE is
 successful in providing a sequence of functionalities that describe
 a PKCS\#11 function, then the trace is effectively reverse-engineered. This
 translates into a vulnerability for the card as it exposes its implementation.
  % and a better understanding of the card's implementation and may
  % suggest different types of attacks depending on the abstraction.
  

 %%%% old figures
\eat{
\begin{figure}[!tb]
  \centering
  \includegraphics[width=0.50\textwidth]{step1}
  %[width=8.75cm,height=8cm]
  \caption{Step 1: the proprietary commands are categorized based on their data exchange properties and the inter-industry commands are mapped to sub-functionalities.} \label{fig:step1} 
\end{figure}


\begin{figure}[!tb]
  \centering
  \includegraphics[width=0.4\textwidth]{step2}
  %[width=8.75cm,height=8cm]
  \caption{Step 2: each command category is mapped to the corresponding inter-industry command set.} \label{fig:step2} 
\end{figure}


\begin{figure}[!tb]
  \centering
  \includegraphics[width=0.4\textwidth]{step3}
  %[width=8.75cm,height=8cm]
  \caption{Step 3:  the inter-industry command mappings whose preconditions are not met, are discarded.} \label{fig:step3} 
\end{figure}


\begin{figure}[!tb]
  \centering
  \includegraphics[width=0.4\textwidth]{step4}
  %[width=8.75cm,height=8cm]
  \caption{Step 4: the sets of inter-industry commands are mapped to sub-functionalities which are mapped to functionalities.} \label{fig:step4} 
\end{figure}


\begin{figure}[!tb]
  \centering
  \includegraphics[width=0.4\textwidth]{step5}
  %[width=8.75cm,height=8cm]
  \caption{Step 5: the sets of functionalities are mapped to the PKCS\#11 models.} \label{fig:step5} 
\end{figure}
}

\eat{
Consider a trace $S$ which is a set of all sent messages \{$command\_{i},...,command\_{i+n}$\} where $\forall c \in S \rightarrow (c \in Interindustry \lor c \in Proprietary$.}


                                   
%\begin{figure}[!tb]
\stitle{Reverse-Engineering Algorithm}
 The overall reverse-engineering process for a trace of commands is shown in
Algorithm~\ref{alg:analysis}\footnote{In Algorithm~\ref{alg:analysis} we only show the conceptual
reverse-engineering process to aid the presentation.  The actual implementation of the
algorithm is in  Prolog
 and  leverages the analysis
algorithm of the language.}.  The input to the algorithm is a list $\T$ of
commands representing the communication trace, whereas the output is a list
$P$ of potential mappings of $\T$ (each mapping is a list itself) and a list $O$ of card functionalities.  The list $P$ is
initialized to $[[]]$ which indicates that the first mapping is the empty
one.   Each command $c \in \T$  is then analyzed and depending on its value of
$Cla$  it is classified as proprietary or  inter-industry.
%proprietary commands
In the former case (lines~\ref{alg:analysis:prop-start}
to~\ref{alg:analysis:prop-end}) the values of its $\L_c$, $\D$ , and
$\L_e$ parameters are checked to categorize its data exchange
properties and obtain a list $\M$ of potential mappings.
\ncomment{From $\M$ we only keep the valid mappings
  (lines~\ref{alg:analysis:expand-start}
  to~\ref{alg:analysis:filter-end}) and store them in $P$. The
  valid mappings are identified based on precondition
   and sub-functionality
  satisfiability (lines~\ref{alg:analysis:filter-precond-start}
  to~\ref{alg:analysis:filter-precond-end}): for each potential mapping to
  an inter-industry command, we check that the preconditions of the
  inter-industry command are met by computing the union of the
  postconditions of all commands that precede it. If the preconditions
  of an inter-industry command are not met, the erroneous mapping is
  removed from $M$ and the analysis continues to the next candidate
  mapping; else, we iterate over the analyzed trace so far,
  and look at the categorization of commands based on their role.
  Using this role, we group commands into different combinations
  that may form potential sub-functionalities. If such grouping
  exists, the mapping is stored in $P$. 
%inter-industry commands
  If $c$ is an inter-industry command, there is only one such mapping
  $n$, so $\M$ is a singleton list. We search for satisfiable
  (sub-)/functionalities by this command and store the command in
  $P$ (lines~\ref{alg:analysis:ii-start}
  to~\ref{alg:analysis:ii-end}). At this point $P$ consists of
  different mappings of the trace. Then, $P$ is further narrowed-down
  based on the sub-functionality and functionality models
%models of PKCS\#11 and ISO-7816 that are
%based on sub-functionalities and functionalities
  (lines~\ref{alg:analysis:func-filter-start}
  to~\ref{alg:analysis:func-filter-end}). For each different mapping
  of the trace, the commands are grouped into sub-functionalities
  which are then further grouped into higher-level functionalities
  that are added to $O$, all in the context of our models. If no such
  grouping is found for a candidate trace, the trace is removed from
  $P$. If a grouping is found, its constituents mappings are annotated
  accordingly to denote this. }{I cannot fix the lines. they are
  wrong. ALSO FOR LOOP at line 10 needs agkistra kai den mporo na ta
  valo!}. The final step of the algorithm is to further narrow-down
$P$ by matching the resulting functionalities in $O$ with the PKCS\#11
models. In the end, $P$ will contain zero or more traces of candidate
mappings. If $P$ is empty, our reverse-engineering has failed to
produce a mapping. If there is only one trace in $P$ we say that the
mapping is unique. If there are more than one candidate traces the
reverse-engineering is successful, but we have only identified an
abstraction of the correct mapping.


\small{
\begin{algorithm}[!tb]
  \relsize{-1}
{
    \SetKwInOut{Input}{input}
    \SetKwInOut{Output}{output}
    \Input{List $\T$ of commands to be analyzed}
    \Output{Potential mappings and operation models $P$ for $\T$}

    \BlankLine
    $P = [[]]$; \quad
    $F=[[]]$\;
 %FOR command start 
    \ForEach{$c(\Cla, \ins, P_1, P_2, \L_c, \D, \L_e) \in \T$}
    {
   %IF proprietary start  
      \If{$\ins$ indicates $c$ is proprietary}{
        \label{alg:analysis:prop-start}
        use $\l_c, d, \l_e$ to extract data exchange properties
        $\d$\;
        $\M = $ list of APDU commands $c$ maps to based on $\d$\;
        \label{alg:analysis:prop-end}
       %%%
  
        \label{alg:analysis:expand-start}
      %  remove $p$ from $P$\;
     
        	\label{alg:analysis:filter-start}
%FOR EACH MAPPING START
        \ForEach{$m \in \M$}{
        
                \label{alg:analysis:filter-precond-start}
      $Z = \{z~|~(k \textrm{~precedes~} m \textrm{~in~} p) \wedge 
            (z \in \textrm{postconditions}(s_k))\}$\;
 %IF PRECON start
          \If{preconditions of $m$ are not satisfied by $Z$}{
            remove $m$ and move on to the next\;          
            \label{alg:analysis:filter-precond-end}
          }
          
     %if pre con end

        \label{alg:analysis:filter-sub-start}
  %if sub start
  
  
  %REPEAT trace start


  { \ForEach{$p \in P$}{
  
  
           \If{ a grouping of $p$ to \textit{sub-functionalities} can be found}      			
		{ 
		\label{lag:analysis:storeP-start}  
		$s = p \oplus (c \mapsto m)$; \quad $P = P \oplus s$
              }        	
 %IF sub end       
        
        }
        }
        %repeat trace end
    
        }
        
     \label{alg:analysis:filter-sub-end}
  \label{alg:analysis:filter-end}        
%  FOR EACH MAPPING END   


     }



    
%%IF proprietary end
    {
        $n = $ inter-industry command $c$ maps to; \quad
        \label{alg:analysis:ii-start}
        $\M = [n]$\;
        annotate each command with its \textit{sub-functionality}\;
          annotate  \textit{sub-functionalities} with \textit{functionalities}\;
          $F = F \oplus \textit{functionalities}$\;
          $s = p \oplus (c \mapsto n)$; \quad $P = P \oplus s$;
           \label{alg:analysis:ii-end}
      }
%IF interindustry edn


}



%FOR command end


    \ForEach{$p \in P$}
    {
      \label{alg:analysis:func-filter-start}
      \ForEach{$(c \mapsto m) \in p$, potential sub-functionality of $m$}{
        group \textit{sub-functionalities} into \textit{functionalities}\;
        \lIf{no such grouping can be found}{remove $p$ from $P$}
        \Else{
          annotate each command with its \textit{sub-functionality}\;
          annotate command groups with \textit{functionalities}\;
          $F = F \oplus \textit{functionalities}$\;
          \label{alg:analysis:func-filter-end}
        }
      }
   
   \ForEach{$f \in F$}{
    		\label{alg:pkcs-matching:start}
                \lIf{ $f \notin \textrm{PKCS\#11 models}$}{ 
                  remove $f$ from $O$;
                  \quad remove $p$ from $P$ }}
   		\label{alg:pkcs-matching:end}
   
    }
    		 
    \Return{$P$,$O$}\;
    
    
  }
  
  \caption{The reverse-engineering process for a trace of commands}
  \label{alg:analysis}
\end{algorithm}

\normalsize


%%% Local Variables: 
%%% mode: latex
%%% TeX-master: "paperEdit"
%%% End: 
