%!TEX root = thesis.tex
%\section{Characterizing unguessable block sources}
\chapter{A Definitional Equivalence}
\label{sec:def equiv}
%We refer to $\mathcal{W}$ as the \emph{family}, the distribution $W$ as a \emph{source} and the particular outcome taken $w\in W$ as the \emph{sample}.   

As described in \secref{sec:family of dist}, our negative results rule out security for an average member of $\mathcal{W}$.  It may be possible to significantly improve parameters by only ruling out security for a single member $W$.  

Recall the security game of a fuzzy extractor: 1) the challenger specifies $(\sketch, \rec)$, 2) the adversary specifies a source $W\in \mathcal{W}$ 3) The challenger wins if $\Hav(W|\sketch(W))\ge \tilde{m}$.  Instead of just thinking of the uniform distribution over $\mathcal{W}$, consider an arbitrary distribution $V$ over elements of $\mathcal{W}$.  The minimax theorem says we can reverse which of these actions is announced first~\cite{von1928theorie} if $\mathcal{A}$ announces $V$ instead of a single element $W$.  That is, the following two player games have the same equilibrium:


\begin{center}
\begin{tabular}{c|c}
\begin{minipage}{3in}
\begin{tabbing}
123\=123\=123\=123\=123\=\kill
\textbf{Experiment} $\Exp^{\mathcal{W}}_1(\mathcal{A}, \mathcal{C}, \tilde{m})$: \\
$(\sketch, \rec)\leftarrow \mathcal{C}(\mathcal{W})$\\
$W \leftarrow \mathcal{A}(\mathcal{W}, \sketch, \rec)$\\
If $W\not\in \mathcal{W}$, $\mathcal{C}$ wins.\\
If $\Hav(W | \sketch(W))\ge \tilde{m}$, $\mathcal{C}$ wins.\\
Else $\mathcal{A}$ wins.
\end{tabbing} 
\vspace{.065in}
\end{minipage}  &
\begin{minipage}{3in}
\begin{tabbing}
123\=123\=123\=123\=123\=\kill
\textbf{Experiment} $\Exp^{\mathcal{W}}_2(\mathcal{A}, \mathcal{C}, \tilde{m})$: \\
$V \leftarrow \mathcal{A}(\mathcal{W})$\\
$(\sketch, \rec)\leftarrow \mathcal{C}(V, \mathcal{W})$\\
$W \leftarrow V$\\
If $W\not\in \mathcal{W}$, $\mathcal{C}$ wins.\\
If $\Hav(W | \sketch(W))\ge \tilde{m}$, $\mathcal{C}$ wins.\\
Else $\mathcal{A}$ wins.
\end{tabbing}
\end{minipage}
\end{tabular}
\end{center}

%The difference between these two games is which player announces their action first, in $\Exp^{\mathcal{W}}_1$, the challenger announces a pair of algorithms and the adversary specifies a source.  In $\Exp^{\mathcal{W}}_2$, the adversary specifies a distribution $V$ over sources, and the challenger then specifies their algorithm.  
This means that showing security for a family of distributions $\mathcal{W}$ is equivalent to showing security for all  distributions $V$ when the distribution is known to the algorithms $V$.  In our negative results, the adversary  uses the uniform distribution $V$ over $\mathcal{W}$.  However, it may be possible to improve parameters by using a different $V$. This would just rule out some member of $\mathcal{W}$ not an average member.  This is true for fuzzy extractors as well and is resilient to changes in parameters including imperfect correctness.


\chapter{Characterizing unguessable block sources}
\label{sec:characterize}

\defref{def:block guessable} is an inherently adaptive definition and a little unwieldy.  In this section, we partially characterize sources that satisfy \defref{def:block guessable}.
The majority of the difficulty in characterizing \defref{def:block guessable} is that different blocks may be dependent, so an equality query on block $i$ may reshape the distribution of block $j$.  In the examples that follow we denote the adversary by $S$ as we consider security against computationally unbounded adversaries defined in VGB obfuscation~(\defref{def:obf}).  We first show some sources that are unguessable block sources~(\secref{sec:positive ex}) and then show distributions with high overall entropy that are not unguessable block sources~(\secref{sec:negative ex}).

\section{Positive Examples}
\label{sec:positive ex}
We begin with the case of independent blocks.

\begin{claim}
\label{cl:independent high ent}
Let $W = W_1,  ... , W_\gamma$ be a source in which all blocks $W_j$  are mutually independent.  Let $\alpha$ be a parameter.  Let $J\subset \{1,..., \gamma\}$ be a set of indices such that for all $j\in J$, $\Hoo(W_j ) =\alpha $.  Then for any $q$, $W$ is a $(q, \alpha - \log (q+1), \gamma - |J|)$-unguessable block source.  In particular, when $\alpha = \omega(\log n)$ and $q = \poly(n)$, then $W$ is a $(q, \omega(\log n), \gamma - |J|)$-unguessable block source.
\end{claim}
\begin{proof}
It suffices to show that for all $j\in J, \Hav(W_j |View(S^{I_{W}(\cdot, \cdot)}) = \alpha -\log (q+1)$.
We can ignore queries for all blocks but the $j$-th, as the blocks are independent. Furthermore, without loss of generality, we can assume that no duplicate queries are asked, and that the adversary is deterministic ($S$ can calculate the best coins). Let $A_1, A_2, \dots A_q$ be the random variables representing the oracle answers for an  adversary $S$ making $q$  queries about the $i$th block. Each $A_k$ is just a bit, and at most one of them  is equal to 1 (because duplicate queries are disallowed). Thus, the total number of possible responses is $q+1$. Thus, we have the following,
\begin{align*}
\Hav(W_j | View(S^{\mathcal{O}_{W}(\cdot, \cdot)}) &= \Hav(W_j| A_1, \dots, A_q)\\
&=\Hoo(W_j) - |A_1, \dots, A_q|\\
&=\alpha - \log (q+1)\,,
\end{align*}
where the second line follows from the first by~\cite[Lemma 2.2]{DBLP:journals/siamcomp/DodisORS08}.
\end{proof}
\noindent \consref{cons:informal construction} is a computational fuzzy extractor for block fixing sources. Claim \ref{cl:independent high ent} shows that unguessable block distributions are a superset of block fixing sources.
We now consider more complicated distributions where blocks are not independent.

\begin{claim}
\label{cl:each block from single seed}
Let $f:\zo^e \rightarrow \mathcal{Z}^\gamma$ be a function.  Furthermore, let $f_j$ denote the restriction of $f$'s output to its $j$th coordinate.  If for all $j$, $f_j$ is injective then $W = f(U_e)$ is a $( q, e - \log (q+1), 0)$-unguessable block source.
\end{claim}
\begin{proof}
Since $f$ is injective on each block, \[\Hav(W_j | View(S^{I_{W}(\cdot, \cdot)})) = \Hav(U_e | View(S^{I_{W}(\cdot, \cdot)})).\]  Consider a query $q_k$ on block $j$.  There are two possibilities: either $q_k$ is not in the image of $f_j$,  or $q_k$ can be considered a query on the preimage $f_j^{-1}(q_k)$. Then (by assuming $S$ knows $f$) we can eliminate queries which correspond to the same value of $U_e$.  Then the possible responses are strings with Hamming weight at most $1$ (like in the
proof of \clref{cl:independent high ent}),
 and by~\cite[Lemma 2.2]{DBLP:journals/siamcomp/DodisORS08} we have for all $j$, $\Hav(W_j | View(S^{I_{W}(\cdot, \cdot)})) \geq \Hoo(W_j) -\log (q+1)$.
\end{proof}

Note the total entropy of a source in \clref{cl:each block from single seed} is $e$, so there is a family of distributions with total entropy $\omega(\log n)$ for which \consref{cons:first construction} is secure.  For these distributions, all the coordinates are as dependent as possible: one determines all others.
We can prove a slightly weaker claim when the correlation between the coordinates $W_j$ is arbitrary:

\begin{claim}
\label{cl:all blocks entropy}
Let $W = W_1,..., W_\gamma$ be a source.  Suppose that for all $j$, $\Hoo(W_j)\geq \alpha$, and that $q \le 2^{\alpha}/4$ (this holds asymptotically, in particular, if $q$ is polynomial and $\alpha$ is super-logarithmic). Then  $W$ is a $(q, \alpha-1-\log(q+1), 0)$-unguessable block source.
\end{claim}

\begin{proof}
Intuitively, the claim is true because the oracle is not likely to return 1 on any query. Formally, we proceed by induction on oracle queries,
using the same notation as in the proof of   \clref{cl:independent high ent}. Our inductive hypothesis is
that $\Pr[A_1\neq 0 \vee \dots \vee A_{k-1}\neq 0] \leq (k-1)2^{1-\alpha}$.  If the inductive hypothesis holds, then, for each $j$,
\begin{equation}
\label{eq:cond-entropy}
\Hoo(W_j | A_1= \dots= A_{k-1}=0) \ge \alpha-1\,.
\end{equation}
This is true for $k=1$ by the condition of the theorem. It is true for $k>1$ because, as a consequence of the definition of $\Hoo$,
for any random variable $X$ and event $E$, $\Hoo(X|E)\ge \Hoo(X)+\log\Pr[E]$; and $(k-1) 2^{1-\alpha}\leq 2 q 2^{-\alpha} \leq 1/2$.

We now show that $\Pr[A_1\neq 0 \vee \dots \vee A_{k}\neq 0] \leq k 2^{1-\alpha}$, assuming that $\Pr[A_1\neq 0 \vee \dots \vee A_{k-1}\neq 0] \leq (k-1)2^{1-\alpha}$.
\begin{align*}
\Pr[A_1&\neq 0 \vee \dots \vee A_{k-1}\neq 0 \vee A_k\neq 0]  \\
&=\Pr[A_1\neq 0 \vee \dots \vee A_{k-1}\neq 0]+\Pr[A_1=\dots = A_{k-1}=0 \wedge A_k=1]\\
& \le  (k-1)2^{1-\alpha}+\Pr[A_k=1\,|\,A_1=\dots = A_{k-1}=0]\\
& \le  (k-1)2^{1-\alpha}+\max_j 2^{-\Hoo(W_j | A_1=\dots =A_{k-1}=0)}\\
& \le  (k-1)2^{1-\alpha}+ 2^{1-\alpha}\\
& = k 2^{1-\alpha}
\end{align*}
(where the third line follows by considering that to get $A_k=1$, the adversary needs to guess some $W_j$, and the fourth line follows by~\eqref{eq:cond-entropy}).
Thus, using $k=q+1$ in~\eqref{eq:cond-entropy},
 we know $\Hoo(W_j | A_1= \dots= A_q=0) \ge \alpha-1$.  Finally this means that
\begin{align*}
\Hav(W_j | A_1,\dots, A_q) &\ge -\log ( 2^{-\Hoo(W_j | A_1= \dots= A_q=0)}\Pr[A_1=\dots=A_q=0]\\
&\, \, \,\,\,\,\,+1\cdot \Pr[A_1\neq 0 \vee \dots \vee  A_q\neq 0] )\\
& \ge -\log \left(  2^{-\Hoo(W_j | A_1= \dots= A_q=0)}+q2^{1-\alpha} \right)\\
& \ge -\log \left(  (q+1) 2^{1-\alpha}\right) = \alpha-1-\log(q+1)\,.
\end{align*}
\end{proof}

\section{Negative Examples}
\label{sec:negative ex}
Claims~\ref{cl:each block from single seed} and~\ref{cl:all blocks entropy} rest on there being no easy ``entry'' point to the distribution.  This is not always the case.  Indeed it is possible for some blocks to have very high entropy but lose all of it after equality queries.

\begin{claim}
Let $p = (\poly(n))$ and let $f_1,..., f_{\gamma}$ be injective functions where $f_j:\zo^{j\times \log p} \rightarrow \zo^n$.\footnote{Here we assume that $n\ge \gamma \times \log p$, that is the source has a small number of blocks.}  Then define the distributions 
\begin{align*}
W_1 &= f_1(U_{1,...,\gamma}), \\W_2 &= f_2(U_{1,..., 2\gamma})\\&,...., \\W_\gamma &= f_\gamma(U).\end{align*}  There is an adversary making $p\times \gamma = \poly(n)$ queries such that \[\Hav(W | View(S^{I_W(\cdot, \cdot)})) = 0.\]
\end{claim}
\begin{proof}
Let $x$ be the true value for $U_{p\times \gamma}$.
We present an adversary $S$ that completely determines $x$.  $S$ computes $y_1^1 = f_1(x_1^1),..., y_1^p = f(x_1^p)$.  Then $S$ queries on $(1, y_1),..., (1, y_p)$, exactly one answer returns $1$.  Let this value be $y_1^*$ and its preimage $x_1^*$.  Then $S$ computes $y_2^1 = f_2(x_1^*,x_2^1), ..., y_2^p= f_2(x_1^*, x_2^p)$ and queries $y_2^1,..., y_2^p$.  Again, exactly one of these queries returns $1$.  This process is repeated until all of $x$ is recovered~(and thus $w$).  %The total space complexity of this algorithm can be reduced to a single query~(by computing $y$ as necessary) as its total time is $O(p\times \gamma)$.  Once $x$ has been recovered then $\Hav(W | View(S^{I_W(\cdot, \cdot)})) = 0$.
\end{proof}

The previous example relies on an adversaries ability to determine a block from the previous blocks.  We formalize this notion next.  We define the entropy jump of a block source as the remaining entropy when other blocks are known:

\begin{definition}
Let $W = W_1,..., W_\gamma$ be a source under ordering $i_1,..., i_\gamma$.  The \emph{jump} of a block $i_j$ is $\mathtt{Jump}(i_j) = \max_{w_{i_1},..., w_{i_{j-1}}} H_0 (W_{i_j} | W_{i_1} = w_{i_1} ,..., W_{i_{j-1}} = w_{i_{j-1}})$.
\end{definition}

If an adversary can learn blocks in succession they can eventually recover the entire secret.  In order for a source to be block unguessable the adversary must get ``stuck'' early enough in their recovery process.  This translates to having a super-logarithmic jump early enough.

\begin{claim}
Let $W$ be a distribution and let $q$ be a parameter, if there exists an ordering $i_1,..., i_\gamma$ such that for all $j\le \gamma-\beta +1$, $\mathtt{Jump}(i_j) = \log q /(\gamma-\beta+1)$, then $W$ is not $(q, 0, \beta)$-unguessable block source.
\end{claim}

\begin{proof}
For convenience relabel the ordering that violates the condition as $1,..., \gamma$.  We describe an unbounded adversary that determines $W_1,..., W_{\gamma-\beta+1}$.  As before $S$ queries the $q /\gamma$ possible values for $W_1$ and determines $W_1$.  Then $S$ queries the (at most)~$q/(\gamma-\beta+1)$ possible values for $W_2 | W_1$.  This process is repeated until $W_{\gamma-\beta+1}$ is learned.
\end{proof}

Presenting a sufficient condition for security is more difficult as $S$ may interleave queries to different blocks.  It seems like the optimum strategy is to focus on a single block at a time but it is unclear how to formalize this intuition.

