%!TEX root = thesis.tex


Let $x\in X$ denote an element $x$ in the support of $X$.  Let $x\leftarrow X$ be the process of a sampling $x$ from the distribution $X$.
$U_n$ is a random variable with the uniform distribution over $\{0
,1\}^n$.  Let $\sd(X, Y)$ be the statistical distance between random variables
$X, Y$ drawn from a set $\chi$, defined as $\sd(X, Y) =
\frac{1}{2}\sum_{x\in \chi} |\Pr(X = x) - \Pr(Y = x)| $. We consider randomized distinguishers that output a single bit.  Given a circuit $D$, 
define the computational distance $\delta^D$
between $X$ and $Y$ as $\delta^D(X, Y) = |\expe[D(X)] - \expe[D(Y)]|$.
For a circuit $D$, we use $|D|$ to denote its size.
For a class of distinguishers, $\mathcal{D}_s$, each of size at most $s$, we write $\delta^{\mathcal{D}_s} = \max_{D\in \mathcal{D}_s} \delta^D(X, Y) = |\expe[D(X)] - \expe[D(Y)]|$.
For a probability distribution $X$, let $\supp(X)$ denote the set of points with nonzero probability.  Let $H_0(X)$ denote the logarithm of the support size of $X$, that is $H_0(X)=\log |\supp(X)|$.  We use an average case notion of remaining support size $\tilde{H}_0(X |P) = \log ( \expe_{p\in P} |\supp(X|P=p)|)$.
All logarithms without a base are considered base 2, that is, $\log x = \log_2 x$.  

For a metric space $(\mathcal{M}, \dis)$, the \emph{(closed) ball of radius $t$ around $x$} is the set of all points within radius $t$, that is, $B_t(x) = \{y| \dis(x, y)\leq t\}$.  If the size of a ball in a metric space does not depend on $x$, we denote by $|B_t|$ the size of a ball of radius $t$.  We consider the Hamming metric over vectors in $\mathcal{Z}^\gamma$, defined via $\dis(x,y) = \{i | x_i \neq y_i\}$.  For this metric, $|B_t| = \sum_{i=0}^t {\gamma \choose i} (|\mathcal{Z}|-1)^i $.  For a vector $w$ over $\mathbb{F}^\gamma$, let $\weight(w) = \{i | w_i \neq 0\}$.  

Usually, we use capitalized letters for random variables and corresponding lowercase letters for their samples.  We frequently use standard order notation (see \cite{cormen2001introduction}).


\section{Entropy and Extraction}
\label{definitions}
We begin introduction standard entropy and extraction notions.


\subsection{Min-Entropy}
We begin with the standard notion of min-entropy and proceed to computational notions.

\begin{definition}
A distribution $X$ has \emph{min-entropy} at least $m$, denoted $\Hoo(X)\geq m$ if $$\forall x\in X, \Pr[X=x] \leq 2^{-m}.$$
\end{definition}

\noindent
We use the average case notion of min-entropy defined by \cite{DBLP:journals/siamcomp/DodisORS08}.

\begin{definition}[\cite{DBLP:journals/siamcomp/DodisORS08}]
Let $(X, Y)$ be a pair of random variables. 
The \emph{average min-entropy} of $X$ conditioned on $Y$ is defined as 
\begin{align*}
\tilde{H}_\infty(X|Y) \overset{\mathrm{def}}= -\log [\expe_{Y} ( 2^{-H_\infty(X|Y)})] = -\log\sum_{y\in Y} \Pr[Y=y] 2^{-H_\infty(X|Y=y)}
\end{align*}
\end{definition}

\subsection{Randomness Extractors}
A \emph{randomness extractor} takes a
distribution $X$ of (average)~min-entropy $m$, and with the help of a uniform
string called the seed, ``extracts'' the randomness contained in $X$
and outputs a string of length $\kappa$ that is \emph{almost uniform} even given the seed.\footnote{In all of our definitions of extractors we assume the extractor outputs its seed.  We omit this from the function definition but all security definitions take in the $d$-bit seed.}
\begin{definition}[\cite{nisan1993randomness}]
\label{def:randomness extractor}
Let $\mathcal{M}$, $\chi$ be finite sets.
A function $\ext: \mathcal{M} \rightarrow \{0,1\}^\kappa$ a \emph{$(\tilde{m}, \epsilon)$-average case extractor} if for all pairs
of random variables $X, Y$ over $\mathcal{M}, \chi$ such that
$\tilde{H}_\infty(X|Y) \ge \tilde{m}$, we have $\sd((\ext(X, U_d), U_d, Y), U_\kappa\times
U_d \times Y) \le \epsilon$.
\end{definition}

%Distributions of average min-entropy compose in the natural way.  That is, combining multiple distributions of average min-entropy cannot decrease the average min-entropy.  This is formalized in the following lemma.
%\begin{lemma}\label{convexEntropyLemma}
%Let $Z_1,..., Z_n$ be discrete distributions.  Define the distribution $Z$ as a convex combination of $Z_1, ..., Z_n$.  That is 
%\begin{align*}
%\Pr[Z=z] \overset{def}= \alpha_1 \Pr[Z_1 = z] +... \alpha_n\Pr[Z_n = z]
%\end{align*} where $\sum_{i = 1}^n \alpha_i = 1$.  Then $\tilde{H}_\infty(Z|Y)\geq \min_{i=1..n}\tilde{H}_\infty(Z_i|Y)$.
%\end{lemma}
%\begin{proof}
%It suffices to show the case where $Z$ is a convex combination of $Z_1, Z_2$.  Let \begin{align*}\nu = \min\{\tilde{H}_\infty(Z_1, Y), \tilde{H}_\infty(Z_2, Y)\}.\end{align*} Recall our definition of $Z$,
% \begin{align*}
% \Pr[Z=z] = \alpha \Pr[Z_1 = z] + (1-\alpha)\Pr[Z_2=z]
%\end{align*}
%We compute the average min-entropy of $Z$ conditioned on $Y$:
%\begin{align*}
%\tilde{H}_\infty(Z|Y ) &=-\log\sum_{y\in Y}\Pr[Y= y]2^{-H_\infty(Z|Y=y)}\\
%&=-\log\sum_{y\in Y}\Pr[Y= y] \max_{z\in Z} \Pr[Z=z |Y=y]\\
%&=-\log\sum_{y\in Y}\Pr[Y= y] \max_{z\in Z} \left(\alpha\Pr[Z_1=z |Y=y]+(1-\alpha)\Pr[Z_2=z |Y=y]\right)\\
%&\geq -\log\sum_{y\in Y}\Pr[Y=y] \alpha\max_{z\in Z1}\Pr[Z_1 =z |Y=y] + (1-\alpha)\max_{z\in Z_2}\Pr[Z_2 = z| Y=y]\\
%&= -\log \sum_{y\in Y}\Pr[Y=y] \alpha 2^{-H_\infty(Z_1| Y=y)}+(1-\alpha)2^{-H_\infty(Z_2|Y=y)}\\
%&= -\log \alpha\left(\sum_{y\in Y}\Pr[Y=y]  2^{-H_\infty(Z_1| Y=y)}\right)+(1-\alpha)\left(\sum_{y\in Y}\Pr[Y=y]2^{-H_\infty(Z_2|Y=y)}\right)\\
%&= -\log \alpha 2^{-\tilde{H}_\infty(Z_1|Y)}+(1-\alpha)2^{-\tilde{H}_\infty(Z_2|Y)}\\
%&\geq -\log \alpha 2^{-\nu} + (1-\alpha) 2^{-\nu}\\
%&=-\log 2^{-\nu} = \nu
%\end{align*}
%\end{proof}



\section{Computational Tools}

We now describe computational notions of entropy.  Our computational notions of entropy have two additional parameters: circuit size $s$
and quality $\epsilon$.  Larger $s$ and smaller $\epsilon$ mean ``better'' entropy.

\subsection{Computational Entropy}
We use the average case notion~\cite{DBLP:conf/eurocrypt/HsiaoLR07} of HILL entropy~\cite{DBLP:journals/siamcomp/HastadILL99}.
\begin{definition}
\label{def:cond hill}
A joint distribution $X |Y $ has \emph{conditional HILL entropy} at least $m$, denoted $H^{\hill}_{\epsilon, s}(X)\geq m$ if there exists a distribution $Z$ where $\Hav(Z|Y) \geq m$, such that $\delta^{\mathcal{D}_s}((X, Y),(Z, Y))\leq \epsilon$.
\end{definition}

\noindent
HILL entropy is a commonly used computational notion of entropy.  It was extended to the conditional case by Hsiao, Lu, Reyzin~\cite{DBLP:conf/eurocrypt/HsiaoLR07}. Here we recall a weaker definition due to Gentry and Wichs~\cite{gentry2011separating}~(the term relaxed HILL entropy was introduced in~\cite{reyzin2011some}).

\begin{definition}
\label{def:relaxed hill}
Let $(X, Y)$ be a pair of random variables.  $W$ has 
\emph{relaxed HILL entropy} at least $m$ conditioned on $S$,
denoted $H^{\hillrlx}_{\epsilon, s}(X|Y)\geq m$ if there exists a joint distribution $(X', Y')$, such that $\tilde{H}_\infty(X'|Y')\geq m$ and $\delta^{\mathcal{D}_{s}} ((X,Y),(X',Y'))\leq \epsilon$.
\end{definition}

However, HILL entropy is a strong notion.  We also consider a significantly weaker version where the value of $X$ is hard to guess given public state.  We use the definition of conditional unpredictability entropy~\cite[Definition 7]{DBLP:conf/eurocrypt/HsiaoLR07}, which captures the notion of ``hard to guess'' (we relax the definition slightly, similarly to the relaxation of HILL entropy above).

\begin{definition}
\label{def:unp entropy}
Let  $(X,Y)$ be a pair of random variables. $X$ has \emph{relaxed unpredictability entropy} at least $m$ conditioned on $Y$, denoted by $H^{\unprlx}_{\epsilon, s} (X|Y) \geq m$, if there exists a pair of distributions $(X', Y')$ such that $\delta^{\mathcal{D}_{s}}((X, Y),(X', Y'))\leq \epsilon$, and for all circuits $\mathcal{I}$ of size $s$,
\[
\Pr[\mathcal{I}(Y') = X' ] \leq 2^{-m}
.\]
\end{definition}

\subsection{Extracting from Computational Entropy}

Extractors can be applied to distributions with
HILL entropy to obtain pseudorandom, rather than random, outputs:
that is, outputs that are computationally indistinguishable from, rather
than statistically close to, uniformly random strings.
We include a proof to provide intuition for manipulating computational entropy~(a similar version of this theorem appeared in~\cite{FR11}).

\begin{theorem}
\label{metricExtract}
Let $\ext: \mathcal{M} \times \{0, 1\}^d \rightarrow \{0, 1\}^{\kappa}$ be a
$(\tilde{m}, \epsilon_{ext})$-extractor, computable by circuits of size $s_{\ext}$.  Let $X, Y$ be a distribution over $\chi$ with $H^{\hillrlx}_{\epsilon_{\hill}, s_{\hill}}(X |Y )\geq \tilde{m}$.
Then $\forall D\in \mathcal{D}_{s'}$, where $s' \approx s_{\hill} -
s_\ext$, 
\[
\delta^D((\ext(X, U_d), Y, U_d), U_m\times Y \times U_d)\leq
\epsilon_{\ext} +
\epsilon_{\hill}\,.
\]
\end{theorem}

\begin{proof}
We proceed by contradiction.  Suppose not, that is, $\exists D\in
\mathcal{D}_{s'}$ such that 
\[ 
\delta^D((\ext(X, U_d), Y, U_d), (U_\kappa \times Y\times  U_d))> \epsilon_{\ext} +
\epsilon_{\hill}.
\]
 We use $D$ to construct a distinguisher $D'$ to distinguish $X , Y$ from all
 distributions $X', Y'$ where $\Hav(X' |Y')\geq \tilde{m}$, violating the
\hillrlx entropy of $X|Y$.  We define $D'$ as follows: upon receiving input
 $\alpha\in\mathcal{M}, \beta\in \chi$, $D'$ samples $\seed \leftarrow U_d$, runs
 $\eta \leftarrow \ext (\alpha, \seed)$ and then runs $D(\eta, \beta, \seed)$ on the result.  Note that $D' \in \mathcal{D}_{s}$ where $s \approx s' + s_\ext = s_{HILL}$.  Thus we have the following $\forall X', Y',$ where $H_\infty(X' | Y')\geq \tilde{m}$:
\begin{align*}
\delta^{D'}(X, Y), (X', Y')) &= \delta^D((\ext(X, U_d), Y, U_d), (\ext(X', U_d), Y', U_d)) \\
&\geq \delta^D((\ext(X, U_d), Y, U_d), (U_\kappa \times Y\times  U_d))\\&- \delta^D((\ext(X', U_d), Y', U_\kappa) \times U_\kappa \times Y' \times U_d)\\
 &>  \epsilon_{\ext}+\epsilon_{\hill} - \epsilon_{\ext} = \epsilon_{\hill}
\end{align*}
Thus $D'$ is able to distinguish $X|Y$ from all $X'| Y'$ with sufficient entropy.  This is a contradiction.
\end{proof}
\noindent
When working with computational entropy, there is no reason to use an information-theoretic randomness extractor.  A computational extractor \cite{krawczyk2010cryptographic} is the adaption of a randomness extractor to the computational setting.  Any information-theoretic randomness extractor is also a computational extractor; however, unlike information-theoretic extractors, computational extractors can expand their output via pseudorandom generators once a long-enough output is obtained. We adapt the definition of Krawczyk~\cite{krawczyk2010cryptographic} to the average case:
\begin{definition}
\label{def:computational extractor}
A function $\cext: \mathcal{M} \rightarrow \{0,1\}^\kappa$ a \emph{$(\tilde{m}, \epsilon_{sec}, s_{sec})$-average-case computational extractor} if for all pairs
of random variables $X, Y$ (with $X$ over $\mathcal{M}$) such that
$\tilde{H}_\infty(X|Y) \ge \tilde{m}$, we have $\delta^{\mathcal{D}_{s_{sec}}}((\cext(X; U_d), U_d, Y), U_\kappa\times
U_d \times Y) \le \epsilon_{sec}$.
\end{definition}

\noindent
Computational extractors also work when given HILL entropy.  The proof is the same as the proof of \thref{metricExtract} and is omitted.

\paragraph{Extracting from unpredictability entropy} Standard extractors cannot extract from distributions with unpredictability entropy.  This requires a special type of extractor with a \emph{reconstruction}.  The best known example of a reconstructive extractor is the Goldreich-Levin hardcore bit~\cite{DBLP:conf/stoc/GoldreichL89}.  
\begin{definition}[Reconstruction procedure]  An $(\kappa, \epsilon)$-reconstruction for a function $ext:\mathcal{M} \rightarrow \zo^\kappa$ is a pair of machines $\compress$ and $\decomp$, where $\compress:\chi \rightarrow \zo^\kappa$ is a randomized Turing machine, and $\decomp^{(\cdot)}:\zo^\kappa \rightarrow \mathcal{M} $ is a randomized oracle Turing machine which runs in time polynomial in $\log |\mathcal{M}|$.  Furthermore, for every $x$ and $T$, if $|\Pr[T(\ext(x, U_d)) = 1] - \Pr[T(U_m\times U_d) = 1]| > \epsilon$, then $\Pr[\decomp^T(\compress^T(x)) = x] > 1/2$ (the probability is over the random choices of $\compress$ and $\decomp$).
\end{definition}
\begin{lemma}\protect{\cite[Lemma 6]{DBLP:conf/eurocrypt/HsiaoLR07}}  
\label{lem:extract from unp}Let $X, Y$ be random variables with $H^{unp}_{\epsilon, s}(X | Y)\geq \tilde{m}$, and let $\ext$ be an extractor with a $(k-\log \frac{1}{\epsilon}, \epsilon)$-reconstruction $(\compress, \decomp)$.  Then 
\[\delta^{\mathcal{D}_{s'}}((\ext(X, U_d), Y, U_d), (U_\kappa \times Y\times U_d))\leq 5\epsilon,\] where $s' = s/(|\compress| + |\decomp|)$.
\end{lemma}


\section{Coding Theory}
\begin{definition}
\label{def:neighborhood}
The $t$-\emph{neighborhood} of $c$, denoted $\neigh_t(c)$, is the set of all points distance $t$ from $c$.  That is $\neigh_t(c) = \{c' | \dis(c, c') = t\}$.
\end{definition}

\subsection{Shannon Codes}
We use the definition of a Shannon code~\cite{shannon1949mathematical}:
\begin{definition}
\label{def:shannon-code}
Let $C$ be a set over space $\mathcal{M}$.  We say that $C$ is an $(t,\delta)$-\emph{Shannon code} if there exists a procedure $\rec$ such that for all $t'\le t$ and for all $c\in C$, $\Pr[c'\leftarrow \neigh_{t'}(c) \wedge \rec(c') \neq c]\le \delta$. To distinguish it from the average-error Shannon code defined below, we will sometimes call it a \emph{maximal-error} Shannon code.
\end{definition}
This is a slightly stronger formulation than usual, in that for every size  $t'<t$ we require the code to correct $t'$ random errors.\footnote{In the standard formulation, the code must correct a random error of size up to $t$, which may not imply that it can correct a random error of a much smaller size $t'$, because the volume of the ball of size $t'$ may be negligible compared to the volume of the ball of size $t$.  
For codes that are monotone~(if decoding succeeds on a set of errors, it succeeds on all subsets), these formulations are equivalent.  However, we will work with an arbitrary recover functionality that is not necessarily monotone.}
Shannon codes work for all codewords. We can also consider a formulation that works for an ``average'' codeword. 

 \begin{definition}
\label{def:average error code}
Let $C$ be a distribution over space $\mathcal{M}$.  We say that $C$ is an $(t,\epsilon)$-\emph{average error Shannon code} if there exists an efficient procedure $\rec$ such that for all $t'\le t$
$\Pr_{c\leftarrow C}[\rec(\neigh_{t'}(c)) \neq c]\le \epsilon$.
\end{definition}
An average error Shannon code is one whose average probability of error is bounded by $\epsilon$.  See~\cite[Pages 192-194]{cover2006elements} for definitions of average and maximal error probability.  An average-error Shannon code is convertible to a maximal-error Shannon code with a small loss.  We use the following pruning argument from~\cite[Pages 202-204]{cover2006elements}:
\begin{lemma}
\label{lem:averageToMaximalError}
Let $C$ be a $(t, \epsilon)$-average error Shannon code with recovery procedure $\rec$ such that $\Hoo(C)\geq m$.  There is a set $\mathcal{C}'$ with $|\mathcal{C}'|\ge2^{m-1}$ that  is a $(t, 2\epsilon)$-(maximal error) Shannon code with recovery procedure $\rec$.
\end{lemma}
\begin{proof}
Let $C$ be the  $(t,\epsilon)$-average error Shannon code with recovery procedure $\rec$ such that  $\Hoo(C)\geq m$.  Then for all $t'\le t$
\[
\sum_{c\in C} \Pr[C=c]\Pr[ c'\leftarrow \neigh (c, t') \wedge \rec(c') \neq c]\leq \epsilon.
\]
For $c$ denote by $\epsilon_c = \Pr[c'\leftarrow \neigh(c, t') \wedge \rec(c') \neq c]$.  
Then by Markov's inequality:
\[
\Pr_{c\in C}[ \epsilon_c \leq 2\expe_{c\leftarrow C} [\epsilon_c ] ] = \Pr_{c\in C} [\epsilon_c \le 2\epsilon ] \geq \frac{1}{2}
\]
Let $C'$ denote the of  set all $c\in C$ where $\epsilon_c\leq 2\epsilon$.  Note that $\Pr_{c\leftarrow C}[c\in C']\geq 1/2$.  Since $H_\infty(C)\geq m$, we know $|C'|\geq 2^{m-1}$~(otherwise $\Pr_{c\leftarrow C}[c\in C']=\sum_{c\in C'}\Pr[C=c]$ would be less than $2^{m-1}\frac{1}{2^m} = 1/2$).  This completes the proof of the~\lemref{lem:averageToMaximalError}.
\end{proof}

\subsection{Hamming Codes}
\label{sec:error correcting codes}
\begin{definition}[Minimum distance]
Let $C$ be a set.  The \emph{minimum distance} of $C$ is $\min_{c, c'\in C} \dis(c, c')$.
\end{definition}
\begin{definition}[Error-correcting code]
A set $C$ is an $(\mathcal{Z}^\gamma, |C|, d)$-\emph{error-correcting code} if its minimum distance is at least $d$.  The elements $c\in C$ are known as codewords.
\end{definition}

\noindent
If a message $c$ is transmitted and at most $t = \lfloor \frac{d-1}{2} \rfloor$ of the symbols of $c$ are modified, it is possible to uniquely recover the transmitted message $c$.  A code is efficient if there exist polynomial time algorithms that sample $c\leftarrow C$ and $\dec(c^*)$ that finds the unique $c\in C$ such that $\dis(c, c^*)\le t$ if one exists.  Many error-correcting codes have additional properties that facilitate encoding and decoding.

\paragraph{Linear error-correcting codes}
Let $\mathcal{Z} = \mathbb{F}_q$ for some field $\mathbb{F}_q$.  
\begin{definition}
\label{def:linear code}
A code $C$ is a $(\mathbb{F}_q^\gamma, \mathbb{F}_q^k, d)$-\emph{linear code} if is a $k$-dimensional linear subspace of $\mathbb{F}_q^\gamma$ with minimum distance $d$.
\end{definition}

\noindent
Linear codes have two associated matrices $G$ and $H$, known as the generating matrix and the parity check matrix respectively.  

\begin{definition}[Generating Matrix]
For any $(\mathbb{F}_q^\gamma, \mathbb{F}_q^k, d)$-linear code $C$ there exists a matrix $G\in \mathbb{F}_q^{\gamma\times k}$ where $span(G) = C$.  
\end{definition}
\noindent
Sampling a random $x\in \mathbb{F}_q^k$ and computing $Gx$ is an efficient encoding function for $C$. Recall that the kernel, or $\mathbf{ker}$, of a matrix is the set of all vectors that map to the $0$ vector.

\begin{definition}[Parity Check Matrix]
For any $(\mathbb{F}_q^\gamma, \mathbb{F}_q^k, d)$-linear code $C$ there exists a matrix $H \in \mathbb{F}_q^{(\gamma - k)\times \gamma}$ such that $\mathbf{ker}(H) = C$.
\end{definition}
\noindent
Fix some $c\in C$, an important property of $H$ is that for any $c^*$ such that $\dis (c, c^*)\le t$, the value $Hc^*$ is unique.  We call $Hc^*$ the \emph{syndrome} of $c^*$.  Indeed, decoding usually consists three steps:
\begin{itemize}
\item Compute $s =Hc^*$.
\item Map $Hc^*$ to an error vector $e\in \mathbb{F}_q^\gamma$ where $\weight(e) \le t$.
\item Subtract $e$ from $c^*$ to obtain $c$.
\end{itemize}

\subsection{Random Linear Codes}
We will use the $q$-ary entropy function, denoted $H_q(x)$ and defined as $H_q(x) = x\log _q(q-1) - x\log_q x - (1-x)\log_q (1-x)$.  Note that $H_2(x) = -x\log x - (1-x)\log (1-x)$.  In the region $[0, \frac{1}{2}]$ for any value $q'\geq q$, $H_{q'}(x)\leq H_{q}(x)$.  The following theorem is standard in coding theory:

\begin{theorem}~\cite[Theorem 8]{venkatLecture}
\label{thm:random code good distance}
For prime $q, \rho\in [0, 1-1/q), 0<\epsilon< 1-H_q(\rho)$ and sufficiently large $\gamma$, the following holds for $\mu = \lceil (1-H_q(\rho) - \epsilon)\gamma\rceil$ .  If $\vA \in \Fq^{\gamma\times \mu}$ is drawn uniformly at random, then the linear code with $\vA$ as a generator matrix has rate at least $(1-H_q(\rho) -\epsilon)$ and relative distance at least $\rho$ with probability at least $1-e^{-\Omega(\gamma)}$.
\end{theorem}

\noindent
We use the following claim~(techniques from Cooper~\cite{cooper2000rank}):
\begin{claim}
\label{cl:full rank matrix}
Let $q\ge 2$ be a prime.  Let $\alpha, \beta$ be integers and let 
let $\vS \overset{\$}\leftarrow \Fq^{\alpha \times (\alpha+\beta)}$ be uniformly generated.  Then $\Pr[\rank(\vS)=\alpha] >  1- q^{-\beta}$.
\end{claim}
\begin{proof}
Let $p_i$ be the probability that the $i$-th
row is linearly dependent on the previous $i-1$ rows. 
By the union bound, the probability that $\alpha$ rows are linearly dependent is bounded by 
$\sum_{i=1}^\alpha p_i$.
Since $i-1$ rows can span a space of size at most $q^{i-1}$, the probability $p_i$ that a randomly chosen $i$th row is in that space is at most $q^{i-1}/q^{\alpha+\beta}$. So
\begin{align*}
\Pr[\rank(\vS) < \alpha] &=\sum_{i=1}^{\alpha} \frac{q^{i-1}}{q^{\alpha+\beta}}
= \frac{q^{\alpha}-1}{q-1}\frac{1}{q^{\alpha+\beta} }< q^{-\beta}.
\end{align*}
\end{proof}



\section{Obfuscation}
Our constructions will use obfuscation for two types of circuits: point functions and digital lockers. The family of point functions $\mathtt{I}_n = \{I_w\}_{w \in \zo^n}$ defined as follows:
\[
I_w(x):\begin{cases} 1 & x=w\\0 & \text{otherwise}\end{cases}.
\]
and the class of digital lockers is $\mathtt{I}_n = \{I_{w, \key}\}_{w \in \zo^n, \key\in\zo^\kappa}$ defined as follows:
\[
I_{w, \key}(x):\begin{cases} \key & x=w\\\perp & \text{otherwise}\end{cases}.
\]
The required notion of obfuscation is virtual grey-box (VGB) introduced in \cite{bitansky2010strong}. This notion is weaker then the standard notion of virtual black-box (\cite{barak2001possibility}), as it allows the simulator to run in unbounded time while making at a polynomial number of oracle queries to the function. 

We require that the obfuscation is composable and secure with respect to auxiliary input. 
Composable auxiliary-input VGB obfuscators for point functions and digital lockers are constructed in \cite[Theorem 6.1]{bitansky2010strong} from the Strong Vector Decision Diffie-Hellman assumption, which is a generalization of the strong DDH assumption of \cite{canetti1997towards} for tuples of points. They can also be constructed by assuming strong properties of cryptographic hash functions~\cite{canetti1997towards}.

\begin{definition}[composable obfuscation VGB obfuscation with auxiliary input \cite{bitansky2010strong}]
\label{def:obf} A PPT algorithm $\mathcal{O}$ is an $\ell$-composable VGB obfuscator for $\mathtt{I}_{n}$~(resp. $\mathtt{I}_{n+\kappa}$) with auxiliary-input if the following conditions are met:
\begin{enumerate}
\item \emph{Functionality:} for every $n$ and $I \in \mathtt{I}_n$, $\mathcal{O}(I)$ is a circuit that computes the same function as $I$.
\item \emph{Virtual grey-box:}  For every PPT adversary $A$ and polynomial $p$, there exists a (possibly inefficient) simulator $S$ and a polynomial $q$ such that for all sufficiently large $n$, any  sequence of circuits $I^1,\dots,I^\ell \in \mathtt{I}_n$, (where $\ell=\poly(n)$) and for all auxiliary inputs $z\in \zo^*$:
\[
|\Pr_{A,\mathcal{O}}[A(z,\mathcal{O}(I^1),\dots,\mathcal{O}(I^\ell)) = 1] - \Pr_{S}[S^{(I^1,\dots,I^\ell)[q(n)]}(z, 1^{|I^1|+|I^\ell|}|) = 1] | < \frac{1}{p(n)},
\]
where $(I^1,\dots,I^\ell)[q(n)]$ is an oracle that answers at most $q(n)$ queries, and where every query of the form $(i,x)$ is answered by $I^i(x)$.
\end{enumerate}
\end{definition}
For notational convenience, when we use point function obfuscation, we denote the oracle provided to the simulator as $I_w(\cdot, \cdot)$ where $w = w_1,..., w_\gamma$ is the vector of obfuscated points.  When we use digital lockers we denote the oracle provided to the simulator as $I_{w, \key}(\cdot, \cdot)$ where $w$ is the vector of obfuscated points and $\key$ is the hidden value~(we will hide the same value in each obfuscation).