%!TEX root = verifiableLeakage.tex

\section{Simulatable vs. Indistinguishability Leakage}
\label{sec:sim and hill}
In this section we show that simulatable leakage is incomparable with indistinguishable leakage functions.  In particular, we show a bounded leakage function that is not simulatable and a simulatable leakage function that completely remove all entropy from the secret.   In all of these results we assume that the public information is witness hiding of the secret state.

%In the setting of Standaert, Pereira, and Yu~\cite{standaertleakage} the variable $K$ is information-theoretically determined before leakage~(they consider the setting of a pseudorandom function with many plaintext/ciphertext pairs.  There is a function that can take a guess for $K$ and use $Y$ to check if the guess $K$ is correct. Measuring security in this setting using min-entropy or HILL entropy makes little sense.

%\begin{lemma}
%\label{lem:verifiable no entropy}
%Let $(K, Y)$ be a pair of variables over $(\mathcal{M}_1, \mathcal{M}_2)$ respectively.  Let $Y, K$ be a unique witness relation computable by $f$ of size $s_{ver}$.  Then $H^{\hill}_{\epsilon, s_{sec}}(K|Y, \mathcal{L}(K))\leq H^{\hill}_{\epsilon, s_{sec}}(K|Y) \leq -\log (1-\epsilon)$ if $s_{sec}\geq s_{ver}$.
%\end{lemma}
%\begin{proof}
%Let $(W, Z)$ be a  joint distribution where $\Hav(W|Z) > \log (1-\epsilon)$ and $\delta^{\mathcal{D}_{s_{sec}}}((K, Y), (W, Z))\leq \epsilon$.  Consider the distinguisher $D(x, y)= f(y, x)$.  Clearly, $\expe[D(K, Y)] =1$.  By indistinguishability, $\expe[D(W, Z)]\geq 1-\epsilon$.  This means that $\expe_{z\leftarrow Z} D(W|Z=z, z) \geq 1-\epsilon$.  For all $z\in \mathcal{M}_2$ there is a unique $w\in\mathcal{M}_1$ such that $f(z, w) =1$, denote this value by $w_z$.  This means that $\expe_{z\leftarrow Z} \Pr[W = w_z | Z=z] \geq 1-\epsilon$.  We then have the following:
%\[
%\expe_{z\leftarrow Z} \max_{w} \Pr[W=w | Z=z] \geq \expe_{z\leftarrow Z} \Pr[W = w_z | Z=z] \geq 1-\epsilon.
%\]
%Taking the negative logarithm of each side yields that $\Hav(W|Z) \leq -\log (1-\epsilon)$.
%\end{proof}
%
%In the previous section~(\lemref{lem:resample possible}), we saw that some condition on $Y$ is necessary to make simulatable leakage meaningful.  Committing to part of $K$ with $Y$ seems like a reasonable midpoint between Lemmas~\ref{lem:resample possible} and~\ref{lem:verifiable no entropy}.  For example, having $K = (K_1, K_2)$ and $Y = f(K_1)$ where $f$ is a one-way function prevents the leak-and-resample simulator.  

We begin by showing there are simulatable leakage functions that completely remove all HILL entropy.
%Both $\hill$ and min-entropy are meaningful in this context.  However, in this setting \hill entropy and simulatable leakage are incomparable.  That is, there are simulatable leakage functions that remove all min-entropy and leakage functions that retain \hill entropy but are not simulatable.  We consider each of these results in turn.

\begin{lemma}[$\simul\not\Rightarrow Indist$]
\label{lem:simul not hill}
Let $K = (K_1, K_2)$ where $K_1\in \zo^{\ell_1}$ and $K_2\in \zo^{\ell_2}$ are uniformly distributed.  Let $f$ be a $(\epsilon_{owp}, s_{owp})$-one-way permutation from $\zo^{\ell_1}\rightarrow \zo^*$ computable in size $|f|$.  Let $Y = f(K_1)$.  Then $\Hoo(K|Y) = \ell_2$.  The function $\mathcal{L}(K) = K_2$ is $(0, \ell_2, \infty)$-simulatable and $H^{\hill}_{\epsilon, s_{sec}}(K|Y, \mathcal{L}(K)) \leq -\log(1-\epsilon)$ if $s_{sec}\geq s_{owf} + \ell_1 + \ell_2$.
\end{lemma}
\begin{proof}
We begin by noting that $Y$ is a $(|f|, s_{owl}, \epsilon_{owl})$, witness hiding relation of $K$ where the relation is $R(k_1, k_2, y) = (f(k_1) \overset{?}=y)$.
The simulator $S$ for $\mathcal{L}$ computes a uniform sample from $\zo^{\ell_2}$.  This is identically distributed to $\mathcal{L}(K)$ and takes $\ell_2$ size to compute.  Since there is a unique $k_1$ for each $y$, $\Hav(K | Y) = \Hav(K_2 | Y) = \Hoo(K_2)$.  It remains to show that $H^{\hill}_{\epsilon, s_{sec}}(K|Y, \mathcal{L}(K)) \leq -\log(1-\epsilon)$.  Suppose not, that is, there exists a distribution $Z$ where $\delta^{\mathcal{D}^{s_{sec}}}((K, f(K_1), K_2), (Z, f(K_1), K_2))\leq \epsilon.$  We present the following distinguisher $D( k_1, k_2, y_1, y_2) = (f(k_1) \overset{?}=y_1 \wedge k_2 \overset{?}= y_2$.

Clearly $\expe[D(K, f(K_1), K_2)] =1$.  By indistinguishability, $\expe[D(Z, f(K_1), K_2)]\geq 1-\epsilon$.  This means that $\expe_{(y_1, y_2)\leftarrow (f(K_1), K_2)} D(Z|f(K_1)=y_1\wedge K_2 = y_2, y_1, y_2) \geq 1-\epsilon$.  For all $y_1, y_2$ there is a unique $z\in\zo^{\ell_1+\ell_2}$ such that $D(z, y_1, y_2) =1$, denote this value by $z_{y_1, y_2}$.  This means that $\expe_{(y_1, y_2)\leftarrow (f(K_1), K_2)} \Pr[Z|f(K_1)=y_1\wedge K_2 = y_2] \geq 1-\epsilon$.  We then have the following:
\begin{align*}
\expe_{(y_1, y_2)\leftarrow (f(K_1), K_2)} \max_{z} \Pr[Z=z | f(K_1)=y_1\wedge K_2 = y_2] &\geq 
\\\expe_{(y_1, y_2)\leftarrow (f(K_1), K_2)} \Pr[Z = z_{y_1, y_2} | f(K_1)=y_1\wedge K_2 = y_2] &\geq 1-\epsilon.
\end{align*}
Taking the negative logarithm of each side yields that $\Hav(Z|f(K_1), K_2) \leq -\log (1-\epsilon)$.
\end{proof}
\noindent
\textbf{Notes:} Gentry and Wichs introduce a weaker notion of HILL entropy where the conditioned is replaced as well as the secret~\cite{gentry2011separating}, this was called relaxed HILL entropy by Reyzin~\cite{reyzin2011some}.  \lemref{lem:simul not hill} extends to relaxed HILL entropy.  %The one-wayness of $f$ does not change the proof, it only prevents the triviality of a \emph{resample-and-leak} simulator.  The statements about the simulatability and the \hill entropy remain the same if $Y= K_1$.  If $f$ is a one-way function, instead of a one-way permutation, \lemref{lem:simul not hill} remains essentially the same except that the entropy of $K|f(K_1)$ and $K|f(K_1), K_2$ increase by the expected number of preimages of $f$.

Thus, we can have a perfect simulation that removes all remaining entropy from $K$.  Thus, there are simulatable leakage functions that remove all HILL~(and true) entropy from the secret state.  We now show the converse is also true.

\subsection{Bounded Length $\not\Rightarrow \simul$}
We now proceed to show a leakage function that retains high HILL entropy~(indeed we show it retains high min-entropy) and is infeasible to simulate.  We will use a secure signature scheme and leak a valid signature.  This leakage function has been used previously to demonstrate the difficult of constructing leakage resilient signature schemes~\cite{faust2012signature}.  We need a signature scheme that is hard to forge and a signature does not determine the secret key completely.  We begin by describing the EU-RMA notion of signatures from Goldwasser, Micali, and Rivest~\cite{DBLP:journals/siamcomp/GoldwasserMR88}.

\begin{definition}[EU-RMA]
We say that a signature scheme $\Sigma = (\gen, \sig , \ver)$ is $(q, s_{sec}, \epsilon)$-\emph{existential unforgeable against random message attacks} if for all circuits $A$ of size $s_{sec}$ the following experiment succeeds with probability at most $\epsilon$:
\begin{align*}
\Pr_{(pk, sk)\leftarrow Gen(\cdot)}[m_1,..., m_q\leftarrow \mathcal{M}\wedge \sigma_i\leftarrow \sig(m_i, \sk) \wedge \\
(m^*, \sigma^*)\leftarrow A(m_1,..., m_q, \sigma_1,..., \sigma_q, \pk) \\
\wedge m^* \neq m_i \wedge \ver(pk, m^*, \sigma^*) = 1]<\epsilon
\end{align*}
\end{definition}

Under this definition a signature must not be simulatable.  To ensure that the secret key still has high \hill entropy we need a signature scheme where multiple private keys exist for each public key.  %This can easily be achieved by adding bits to the secret key that are never used.  
However, leakage on these bits is easily simulatable.  We instead use a scheme where the secret key is computationally restricted given the public key.  These schemes naturally arise in  leakage resilient cryptography.  We use the one-time secure signature scheme of Katz and Vaikunatanathan~\cite{katz2009signature}.  The construction of Katz and Vaikunatanathan is an extension of the Lamport signature scheme~\cite{lamport1979constructing} using collision resistant hash functions and error correcting codes.  For our setting, we only Lamport's signature scheme.  The scheme will sign messages of length $\ell$.

\begin{construction}
\label{cons:katz sig}
Let $f$ be a $(\epsilon_{owf}, s_{owf})$-one-way function mapping $\zo^{k^c}\rightarrow \zo^{k}$ for some $c>1$:
\begin{description}
\item[Key Generation:] Choose random $x_{i, 0}, x_{i, 1}\leftarrow \zo^{k^c}$ for $i=1,..., \ell$.  Compute $y_{i, b}\leftarrow f(x_{i, b})$ for $i\in \{1,..., \ell \}$ and $b\in \zo$.  The public key is $pk = \{y_{i, b}\}$ and the secret key is $\{x_{i, b}\}$.
\item[Signing:] The signature on a $k$-bit message $m = m_1,..., m_k$ consists of the $k$ values $x_{1, m_1},..., x_{k, m_k}$.
\item[Verification:] Given $x_1,..., x_k$ and $m = m_1,.., m_k$ and $pk = (s, \{y_{i, b}\})$, output 1 iff $y_{i, m_i}\overset{?}=f(x_i)$ for all $i$.
\end{description}
\end{construction}

\begin{theorem}
\label{thm:one time sig scheme}
Let $\lambda$ be a security parameter. 
\consref{cons:katz sig} is a $(1, \epsilon_{owf}, s_{owf})$-secure signature scheme.  Furthermore $\Hav(SK | PK)\geq 2\ell k^{c} - 2\ell k $.  Furthermore for any message $m$, $\Hav(SK | PK ,\sig_{PK}(m)) \geq k^{c+1} - 2\ell k$.
\end{theorem}
\begin{proof}
We omit the proof that the scheme is secure~(and that $Y = PK$ is witness hiding of $X$).  We have the following for the entropy calculations by~\cite[Lemma 2.2]{DBLP:journals/siamcomp/DodisORS08}, $\Hav(SK | PK) = 2\ell k^{c} - 2\ell k$.  Similarly, for any $m$ $\Hav(SK | PK , \sig_{PK}(m)) = 2\ell k^{c} - 2\ell k - \ell k^{c} = \ell k^{c} - 2\ell k$.
\end{proof}
\begin{lemma}
\label{lem:bounded not sim}
Let $(\gen, \sig, \ver)$ be as above for some $c>1$, let $K = SK, Y = PK$.  Then for any message $m$ the function $\mathcal{L}(K) = \sig_{SK}(m)$ is not simulatable by any $S$ of size $s_{sec} \le s_{owf}$ with $\epsilon \le \epsilon_{owf}$.  Furthermore, $\Hav(SK | PK, \mathcal{L}(K)) = \ell k^{c} - 2\ell k$~(and thus, $H^{\hill}_{0, \infty}(SK | PK, \mathcal{L}(K))= \ell k^{c}- 2\ell k$).
\end{lemma}
\begin{proof}
The lack of a simulator follows from the one-time security of the signature scheme.  The remaining entropy follows from \thref{thm:one time sig scheme}.
\end{proof}

\section{Simulatable vs. Hard-to-Invert Leakage}
\label{sec:sim and unp}

In the previous section, we showed that simulatable leakage and indistinguishable leakage are incomparable.  In this section, we turn to the weakest known setting of hard-to-invert leakage.  %We begin with the setting when $K$ is information-theoretically determined.  
We now show that simulatable leakage preserves unpredictability but there are leakage functions that preserve unpredictability that are not simulatable.  

\textbf{Note:} showing that leakage functions that preserve unpredictability may not be simulatable is enough to show a separation between indistinguishable and bounded leakage as well.  We keep these results separate for clarity.

\subsection{$\simul \Rightarrow$ Hard to invert }
%We begin with a definition of relaxed unpredictability entropy~(defined by Fuller, Meng, and Reyzin~\cite{fullerMengReyzin2013}).\footnote{In our proofs, we start from the weaker notion of relaxed unpredictability entropy and achieve the more standard unpredictability entropy.}
%\begin{definition}
%Let $K, Y$ be a pair of random variables.  We say that $K, Y$ have \emph{relaxed unpredictability entropy}, denoted $H^{\unprlx}_{\epsilon, s_{sec}}(K|Y)\geq k$ if for all $W, Z$ such that $\delta^{\mathcal{D}_{s_{sec}}}((K, Y), (W, Z))\leq \epsilon$ and for all $\mathcal{I}$ of size at most $s_{sec}$, $\Pr[\mathcal{I}(Z) = W]\leq 2^{-k}$. 
%\end{definition}
%Note that relaxed unpredictability entropy is a stronger condition than a witness hiding relation.

%We again consider the unique witness setting and show that simulatable leakage decreases unpredictability entropy.
%
%\begin{lemma}
%\label{lem:unp red info det}
%Let $(K, Y)$ be a pair of variables and let $Y, K$ be a unique witness relation computable by $f$ of size $s_{ver}$.  Let $\mathcal{L}$ be a $(\epsilon_{sim}, s_{sim}, s_{sec})$-simulatable leakage function.  Then
%\[
%H^{\unp}_{0, s_{ent}'}(K|Y, \mathcal{L}(K)) \geq -\log\left(2^{-H^{\unprlx}_{\epsilon_{ent}, s_{ent}}(K |Y )}+\epsilon_{sim}+\epsilon_{ent}\right).
%\]
%\end{lemma}
%where $s'_{ent} = \min\{s_{sec}-s_{ver}, s_{ent} - s_{sim}\}$.
%\begin{proof}
%Denote $H^{\unprlx}_{\epsilon_{ent}, s_{ent}}(K |Y ) =k$ and $k'= -\log \left(2^{-k}+\epsilon_{sim}+\epsilon_{ent}\right)$.  Let $S$ be a simulator of size $s_{sim}$ for $\mathcal{L}$.
%Suppose, for the purpose of arriving at a contradiction, that $H^{\unprlx}_{0, s_{ent}'}(K|Y, \mathcal{L}(K)) \leq k'$.  That is, $\exists \mathcal{I}'$ of size at most $s_{ent}$ such that $\Pr[\mathcal{I}'(Y, \mathcal{L}(K)) = K] > 2^{-k'}$.
%%for all $W, Z_1, Z_2$ where $\delta^{\mathcal{D}_{s_{ent}}}((K, Y, \mathcal{L}(K)), (W, Z_1, Z_2))\leq \epsilon$, 
%%\[
%%\Pr[\mathcal{I}(Z_1, Z_2)]\geq 2^{-k'}.
%%\]
%
%Let $W, Z$ be a distribution such that that $\delta^{\mathcal{D}_{s_{ent}}}((K, Y), (W, Z))\leq \epsilon$.  
%We describe an inverter $\mathcal{I}$~(of size $s_{sim} + s_{ent}' \leq s_{ent}$) for $W$ given $Z$ as follows:
%\begin{itemize}
%\item Input $y$.
%\item Run $z\leftarrow S(z)$.
%\item Output $\mathcal{I}'(y, z)$.
%\end{itemize}
%
%The crux of the lemma is that the inverter $\mathcal{I}'$ must work for all indistinguishable distributions, even with simulated leakage.  This is because $\mathcal{I}$ can verify if $\mathcal{I}'$ gives a correct answer.
%\begin{claim}
%$\Pr[\mathcal{I}'(Z,S(Z)) = W]\geq \Pr[\mathcal{I}'(Y, \mathcal{L}(K)) = K] - \epsilon_{sim} - \epsilon_{ent}$. 
%\end{claim}
%\begin{proof}
%First recall that since $\delta^{\mathcal{D}_{s_{ent}}}((K, Y), (W, Z))\leq \epsilon_{ent}$, then $\delta^{\mathcal{D}_{s_{ent}}}(Y, Z')\leq \epsilon$.  Thus, 
%\[
%\delta^{\mathcal{D}_{s_{ent}- s_{sim}}}((Y, S(Y)), (Z', S(Z')))\leq \epsilon_{ent}.
%\]
%By assumption $\delta^{\mathcal{D}_{s_{sim}}}((Y, \mathcal{L}(K)), (Y, S(Y))\leq \epsilon_{sim}$, thus, 
%\[
%\delta^{\mathcal{D}_{\min_{s_{sec}, s_{ent}- s_{sim}}}}((Y, \mathcal{L}(K)), (Z', S(Z')))\leq \epsilon_{ent}+\epsilon_{sim}.
%\]
%Now suppose for contradiction that $\Pr[\mathcal{I}'(Z', S(Z')) = W']< \Pr[\mathcal{I}'(Y, \mathcal{L}(K)) = K ] -\epsilon_{sim}-\epsilon_{ent}$.  We present a distinguisher $D$~(of size $s_{ent}+s_{ver}$) for $(Y, \mathcal{L}(K))$ and $(Z', S(Z'))$:
%\begin{itemize}
%\item On input $y, z$.
%\item Run $x\leftarrow \mathcal{I}'(y, z)$.
%\item Output $1$ if and only if $f(y, x)=1$.
%\end{itemize}
%%By assumption $\Pr[D(Y, \mathcal{L}(K))] \geq \Pr[\mathcal{I}(Y, \mathcal{L}(K)) =K]\geq 2^{-k'}$.  
%Then, $\Pr[D(Y, \mathcal{L}(K))=1]-\Pr[D(Z', S(Z')) =1] \geq = \Pr[\mathcal{I}'(Y, \mathcal{L}(K) = K)] - \Pr[\mathcal{I}'(Z', S(Z')) = W'] > \epsilon_{sim}+\epsilon_{ent}$.
%\end{proof}
%
%\noindent
%Thus, $\Pr[\mathcal{I}(Z) = W ] =\Pr[\mathcal{I}'(Z, S(Z)) = W]\geq 2^{-k'}-\epsilon_{sim} - \epsilon_{ent}=k$ a contradiction.  This completes the proof.
%\end{proof}
%
%In the setting where $K$ is completely determined, simulatable leakage does not remove unpredictability.  The reduction can check whether it has the right answer, ``removing'' indistinguishable distributions.  In the next section, we consider the setting where $K$ is not determined but restricted to some hard to compute space~(pre images of a one-way function).

%For the moment we use one-way function notation, this notion is related to unpredictability entropy.
%
%\begin{proposition}
%\label{prop:owf implies unp}
%Let $f$ be a $(s, \epsilon, K)$-one-way function, then $H^{\unp}_{0, s}(K|f(K))\geq -\log \epsilon$.
%\end{proposition}
%In the case where $f$ is a permutation~(information-theoretically determines $K$) the proposition works both ways.\footnote{The unpredictability entropy inverter must recover the used preimage, while the one-way function inverter must recover some preimage.  In the case where there is a single preimage, these are the same task.}
%\begin{proposition}
%\label{prop:unp implies owp}
%Let $f$ be a injective function such that $H^{\unp}_{0, s}(K|f(K))\geq -\log \epsilon$ then $f$ is a $(s,\epsilon, K)$-one-way function~(permutation).
%\end{proposition}

If the starting condition $Y$ is a witness hiding function of $K$ we can show that any simulatable leakage does not  improve the ability to predict secret state.  More precisely we show the ability to predict the witness given both $Y$ and $\mathcal{L}(X)$ is not significantly different than the ability to predict the witness given just $Y$. % Note that being a witness hiding relation and being unpredictable are closely related but incomparable.  A witness hiding relation guarantees there is a verifiable procedure for valid witness that outputs $1$ and there is no way to produce indistinguishable fake witnesses.  Unpredictability guarantees that an indistinguishable distribution is hard to predict.

\begin{lemma}
\label{lem:owf unpredictability}
Let $K$ be a distribution $\mathcal{M}$ and let $Y$ some public information.  Furthermore, let $R$ be a $(s_{rel}, s_{inv}, \epsilon_{rel})$-witness hiding relation.  %$f:\mathcal{M}\rightarrow \zo^*$ be a $(s_{owf}, \epsilon_{owf}, K)$-one-way function.  
%Furthermore, assume that $H^{\unp}_{0, s}(K | Y)\ge k$.
Let $\mathcal{L}$ be a $(\epsilon_{sim}, s_{sim}, s_{sec})$-simulatable leakage for $(K,Y)$.  Then $H^{\unp}_{0, s_{inv}'}(K | Y, \mathcal{L}(K)) \ge k'$ for $k' = -\log(\epsilon_{rel} + \epsilon_{sim})$ and $s_{inv}' = \min\{ s_{sec}-s_{rel},s_{inv}-s_{rel} \}$.

%$f | \mathcal{L}(K) = f(K) | \mathcal{L}(K)$ is a $(s'_{owf}, \epsilon_{owf}+\epsilon_{sim}, K)$-one-way function where $s_{owf}' = \min \{s_{sec}-|f|, s_{owf} - s_{sim}\}$.  
\end{lemma}
\begin{proof}


Assume with the aim of arrive at a contradiction, that there exists an inverter $\mathcal{I'}$ of size $s_{inv}'$ such that $\Pr[\mathcal{I}(Y, \mathcal{L}(K)) = K] > 2^{-k'}$.  Let $S$ be a simulator of size $s_{sim}$ for $\mathcal{L}$.  

\begin{claim}
$\Pr[R(\mathcal{I'}(Y, S(Y)), Y)=1]\ge \Pr[R(\mathcal{I'}(Y, \mathcal{L}(K)), K) =1] - \epsilon_{sim} \ge 2^{-k'} - \epsilon_{sim}$. 
\end{claim}
\begin{proof}
Recall that $\delta^{\mathcal{D}_{s_{sec}}}((Y, \mathcal{L}(K)), (Y, S(Y)))\le \epsilon_{sim}$.  Suppose for contradiction that $\Pr[R(\mathcal{I'}(Y, S(Y)), K) =1 ] < \Pr[R(\mathcal{I'}(Y, \mathcal{L}(K)), K) =1 ] -\epsilon_{sim}$. We present a distinguisher $D$ of size $s_{inv}' + s_{rel}\le s_{sec}$:
\begin{itemize}
\item On input $y, z$.
\item Run $x\leftarrow \mathcal{I'}(y, z)$.
\item Output $1$ if and only if $R(x, y)=1$.
\end{itemize}
Then 
\begin{align*}
\Pr&[D(Y, \mathcal{L}(K)) = 1] -\Pr[D(Y, S(Y)) =1 ]\\&= \Pr[R(\mathcal{I'}(Y, \mathcal{L}(K)), K) =1 ] - \Pr[R(\mathcal{I'}(Y, S(Y)), K) =1 ]> \epsilon_{sim}.
\end{align*}
This is a contradiction.
\end{proof}

\noindent
Thus, $\Pr[R(\mathcal{I'}(Y, S(Y)) =1] \ge 2^{-k'} - \epsilon_{sim}$.  
We present the following inverter $\mathcal{I}$ of size $s_{inv}'+ s_{sim}\le s_{inv}$:
\begin{itemize}
\item Input  $y$.
\item Run $z\leftarrow S(y)$.
\item Output $\mathcal{I'}(y, z)$.
\end{itemize}

Then, $\Pr[R(Y, \mathcal{I}(Y)) =1 ]>  2^{-k'} -\epsilon_{sim} \ge \epsilon_{sim} + \epsilon_{rel}-\epsilon_{sim} = \epsilon_{rel}$.  This is a contradiction.
\end{proof}
%
%Denote $\epsilon' = \epsilon_{owf}+\epsilon_{sim}$ and $f' = f|\mathcal{L}$.  Let $S$ be a simulator of size $s_{sim}$ for $\mathcal{L}$.  Suppose, with the aim of arriving at a contradiction, that there exists an $\mathcal{I}'$ of size $s_{owf}'$ that finds pre images of $f'$ with probability at least $\epsilon'$.  That is, $\Pr[f'(\mathcal{I}'(f'(K))) = f'(K))]> \epsilon'$.  Since $f$ is a substring of $f'$, this implies that $\Pr[f(\mathcal{I}'(f'(K))) = f(K)]>\epsilon'$.  Let $S$ be a simulator of size $s_{sim}$ for $\mathcal{L}$.  We present the following inverter $\mathcal{I}$ of size at most $s'_{owf}+ s_{sim}\leq s_{owf}$ that inverts $f(K)$:
%\begin{itemize}
%\item Input $y$.
%\item Run $z\leftarrow S(y)$.
%\item Output $\mathcal{I}'(y, z)$.
%\end{itemize}
%\begin{claim}
%$\Pr[f(\mathcal{I}'(f(K), S(f(K)))) = f(K)] \geq \Pr[f(\mathcal{I}'(f'(K))) = f'(K)] -\epsilon_{sim}$.
%\end{claim}
%\begin{proof}
%Recall $
%\delta^{\mathcal{D}_{s_{sec}}}((f(K), \mathcal{L}(K)), (f(K), S(f(K))))\leq \epsilon_{sim}$.
%
%Suppose for contradiction $\Pr[f(\mathcal{I}'(f(K), S(f(K))) )= f(K)] < \Pr[f(\mathcal{I}'(f'(K))) =f(K)] - \epsilon_{sim}$.  We present a distinguisher $D$~(of size $s_{owf}' + |f|\leq s_{sec}$) for $f'(K)$ and $(f(K), S(f(K)))$:
%\begin{itemize}
%\item On input $y, z$.
%\item Run $x\leftarrow \mathcal{I}'(y, z)$
%\item Output $1$ if and only if $f(x) = y$.
%\end{itemize}
%Then,
%\begin{align*}
%\Pr&[D(f'(K)) = 1] -\Pr[D(f(K), S(f(K))) =1 ]\\&=\Pr[f(\mathcal{I}'(f(K), \mathcal{L}(K))) = f(K)] - \Pr[f(\mathcal{I}'(f(K), S(f(K)))) = f(K)]> \epsilon_{sim}
%\end{align*}
%This is a contradiction.
%\end{proof}
%
%\noindent We then have that 
%\begin{align*}
%\Pr[f(\mathcal{I}(f(K))) = f(K)] &= \Pr[f(\mathcal{I}'(f(K), S(f(K))))= f(K)] \\
%&\geq \Pr[f(\mathcal{I}'(f(K), \mathcal{L}(K))) = f(K)] - \epsilon_{sim}\\
%&> \epsilon_{sim}+\epsilon_{owf} - \epsilon_{sim} = \epsilon_{owf}.
%\end{align*}
%This contradicts the fact that $f$ is a $(\epsilon_{owf}, s_{owf}, K)$-one-way function.
%\end{proof}
%
%\begin{corollary}
%Let $K$ be a distribution $\mathcal{M}$ and let $f:\mathcal{M}\rightarrow \zo^*$ be a $(s_{owf}, \epsilon_{owf}, K)$-one-way function.  Let $\mathcal{L}$ be a $(\epsilon_{sim}, s_{sim}, s_{sec})$-simulatable leakage for $(K, f(K))$.  Then 
%\[
%H^{\unp}_{0, s_{owf}'}(K | f(K), \mathcal{L}(K)) \geq -\log\left( \epsilon_{owf} + \epsilon_{sim}\right)
%\]
%where $s_{owf}' = \min \{s_{sec}-|f|, s_{owf} - s_{sim}\}$.  
%\end{corollary}
%\begin{proof} Direct consequence of Proposition~\ref{prop:owf implies unp} and \lemref{lem:owf unpredictability}.\end{proof}
%
%Thus, unpredictability is preserved under simulatable leakage.  In the next section, we show that simulatable leakage is strong than unpredictability.  That is, there are leakage functions that preserve unpredictability and are not simulatable.
%%%%%I don't think these proofs are right.  I don't think they are really showing anything interesting anyways.  We've made a connection between unpredictability, that's good enough.
%

%We now rederive \lemref{lem:unp red info det} using \lemref{lem:owf unpredictability}.  First we need the fact that for a unique-witness relation the job of the inverter is not much harder with relaxed unpredictability entropy.
%\begin{proposition}
%Let $(K, Y)$ be a pair of variables and let $Y, K$ be a unique witness relation computable by $f$ of size $s_{ver}$.  Then 
%\[
%H^{\unp}_{0, s - s_{ver}}(K|Y)\leq -\log \left(2^{-H^{\unprlx}_{\epsilon, s}}(K|Y)+\epsilon\right).
%\]
%\end{proposition}
%\begin{proof}
%Denote $\epsilon' = 2^{-H^{\unprlx}_{\epsilon, s}}(K|Y)+\epsilon$.
%Suppose not, that is, $\exists \mathcal{I}'$ of size $s-s_{ver}$ such that $\Pr[\mathcal{I}'(Y)=K]\geq \epsilon'$.  Furthermore, let $W, Z$ be a distribution such that $\delta^{\mathcal{D}_{s}}((K, Y), (W, Z))\leq \epsilon$.
%\begin{claim}
%$\Pr[\mathcal{I}'(S) = W]\geq \Pr[\mathcal{I}'(Y) = K] - \epsilon$.
%\end{claim}
%\begin{proof}
%If not, we have the following distinguisher $D$ for $Y, S$~(which suffices to distinguish the joint distribution) of size $|\mathcal{I}'|+s_{ver}\leq s$, $D(x, y) = (f(\mathcal{I}'(y), y) \overset{?}=1)$. 
%\end{proof}
%\noindent Thus, $\Pr[\mathcal{I}'(S) = W] > 2^{-H^{\unprlx}_{\epsilon, s-s_{ver}}}(K|Y)+\epsilon - \epsilon = 2^{-H^{\unprlx}_{\epsilon, s-s_{ver}}}(K|Y)$ a contradiction.
%\end{proof}
%\begin{corollary}
%\label{cor:owp no good change}
%$ 2^{-H^{\unprlx}_{\epsilon, s}}(K|Y) \leq 2^{-H^{\unp}_{0, s - s_{ver}}(K|Y)}-\epsilon$.
%\end{corollary}
%\begin{corollary}
%Let $(K, Y)$ be a pair of variables and let $Y, K$ be a unique witness relation computable by $f$ of size $s_{ver}$ that is $(\epsilon_{owf}, s_{owf}, K)$-hard.  Let $\mathcal{L}$ be a $(\epsilon_{sim}, s_{sim}, s_{sec})$-simulatable leakage function.  Then
%\begin{align*}
%H^{\unp}_{0, s_{ent}'}(K|Y, \mathcal{L}(K)) &\geq -\log\left(2^{-H^{\unprlx}_{\epsilon_{ent}, s_{ent}}(K |Y )}+\epsilon_{sim}+\epsilon_{ent}\right) \\
%&\geq -\log\left(2^{-H^{\unprlx}_{0, s_{ent}}(K |Y )}+\epsilon_{sim}\right) \\
%&\geq -\log\left(\epsilon_{owf}+\epsilon_{sim}+\epsilon_{ent}\right) 
%\end{align*}
%where $s'_{ent} = \min\{s_{sec}-s_{ver}, s_{owf} - s_{sim} -s_{ver}\}$.
%\end{corollary}
%\begin{proof}
%Combination of \lemref{lem:owf unpredictability}, Proposition~\ref{prop:owf implies unp} and \corref{cor:owp no good change}.
%\end{proof}

\subsection{Hard to invert $\not\Rightarrow \simul$}
In the previous section, we saw that simulatable leakage implied unpredictability entropy.  In this section, we show this containment is tight.

\begin{lemma}
\label{lem:unp not sim}
Let $f_1:\mathcal{M}_1\rightarrow \mathcal{M}_2$ be $(s_{1}, \epsilon_1, U_{\mathcal{M}_1})$-one-way and let $f_2:\mathcal{M}_3\rightarrow \zo^*$ be a $(s_2, \epsilon_2, f_1(U_{\mathcal{M}_1}))$-one-way.  Then for $K = U_{\mathcal{M}_1}$ and $Y = f_2(f_1(K))$, $\mathcal{L}(K) = f_1(K)$ the following hold:
\begin{enumerate}
\item The function $f_2\circ f_1$ is $(s_2 - |f_1|, \epsilon_2, U_{\mathcal{M}_1})$-one-way.  Alternatively, $H^{\unp}_{0 , s_2-|f_1|}(K| Y) \geq -\log \left( \epsilon_2\right) $.
\item The function $f_3 = (f_2\circ f_1) | f_1$ is $(s_1-f_1, \epsilon_1, U_{\mathcal{M}_1})$-one-way.  Alternatively, $H^{\unp}_{0, s_1 - |f_2|}(K | Y , f_1(K))\geq -\log \left( \epsilon_1\right)$.
\item The leakage function $\mathcal{L}(K) = f_1(K)$ is not $(|f_2|, s_2, 1-\epsilon_2)$-simulatable.
\end{enumerate}
\end{lemma}
\begin{proof}
We prove each statement in turn.  Statement \emph{1.}\,follows directly from the fact that $f_2$ is hard on $f_1(U_{\mathcal{M}_1})$.  Suppose Statement \emph{2.} is not true, that is, there exists an inverter $\mathcal{I}'$ of size $s_1 -|f_1|$ that inverts $f_2\circ f_1 | f_1$.  We present the following inverter $\mathcal{I}$ for $f_1$~(of size $s_1$):
\begin{itemize}
\item Input $y\in \mathcal{M}_2$.  
\item Compute $z = f_2(y)$.
\item Output $\mathcal{I}'(y, z)$.
\end{itemize}
Then we have the following:
\[
\Pr_{x\leftarrow U_{\mathcal{M}_1}}[f_1(\mathcal{I}(f_1(x))) = f_1(x)] \geq \Pr_{x\leftarrow U_{\mathcal{M}_1}}[(f_2 \circ f_1 | f_1)(\mathcal{I}'(f_2 \circ f_1 | f_1)(x)) = (f_2\circ f_1 | f_1)(x)] > \epsilon_1
\]
This contradicts the onewayness of $f_1$.

Now suppose that Statement \emph{3.} is not true.  That is there exists a simulator $S$ of size $s_2$ that simulates $f_1(K)$.  That is, $\delta^{\mathcal{D}_{s_{sec}}}(((f_2\circ f_1)(K), f_1(K)), (f_2\circ f_1)(K), S(Y))< 1-\epsilon_2$.  We show this contradicts Statement \emph{2.}  Consider the distinguisher $D$, of size $|f_2|$,  that does the following:
\begin{itemize}
\item Input $y, z$.
\item Output $1$ if and only if $y = f_2(z)$.
\end{itemize}
Clearly, $ \expe [ D((f_2\circ f_1)(K), f_1(K))] = 1$.  Thus, by indistinguishability, $\expe[D((f_2\circ f_1)(K), S(f_2\circ f_1(K)) ] \geq 1-(1-\epsilon_2) \geq \epsilon_2$.  This means that $S$ is an inverter for $f_2$, this is a contradiction.
\end{proof}
In the setting where $f_1$ and $f_2$ are the same hardness, revealing $f_1(K)$ does not significantly decrease the unpredictability of $K$, but it is not simulatable at any reasonable quality.