%!TEX root = thesis.tex

For the remainder of this work we switch to fuzzy extractors that provide computational security (\defref{def:comp fuzzy extractor}).  We begin by showing a fuzzy extractor whose output key is as long as the starting entropy.  This is impossible in the information-theoretic setting~(unless all points of the distribution are far apart and $\Hfuzz(W) = \Hoo(W)$). 

\section{Computational Fuzzy Extractor based on \class{LWE}}
\label{sec:fuzzyCompExt}

In this section, a computational fuzzy extractor based on the learning with errors assumption.  Security of our construction depends on the source $W$. We first consider  a uniform source $W$; we consider other distributions in \secref{sec:LWE block fixing sources}.  Our construction uses the code-offset construction (described in \consref{cons:code offset}) instantiated with a random linear code over a finite field $\Fq$.   Let $\decode_t$ be an algorithm that decodes a random linear code with at most $t$ errors (we will present such an algorithm later, in \secref{sec:time main construction}). 

\begin{construction}
Let $n$ be a security parameter and let $\gamma \ge n$.  Let $q$ be a prime. 
Define $\gen, \rep$ as follows:
\begin{center}
\begin{tabular}{c|c}
\begin{minipage}{3in}
\textbf{\gen}
\begin{enumerate}
\item \underline{Input}: $w\leftarrow W$ (where $W$ is some distribution over $\Fq^\gamma$).
\item Sample $\vA\in\Fq^{\gamma \times n}, \vx\in\Fq^n$ uniformly.
\item Compute $p = (\vA, \vA \vx+w)$, \\\ $\key = \vx_{1,...,n/2}$.
\item Output $(\key, p)$.
\end{enumerate}
 \end{minipage} &
\begin{minipage}{3in}
\textbf{\rep}
\begin{enumerate}
\item \underline{Input}: $(w', p)$.
\item Parse $p$ as $(\vA, \vect{c})$; \\let $\vb=\vect{c}-w'$.
\item Let $x = \decode_t(\vA, \vb)$\\
\item Output $\key= x_{1,...,n/2}$.
\end{enumerate}
\end{minipage} 
\end{tabular}
\end{center}
\label{cons:informal construction}
\end{construction}


Intuitively, security comes from the computational hardness of decoding random linear codes with a high number of errors (introduced by $w$).  
In fact, we know that decoding a random linear code is NP-hard~\cite{berlekamp1978}; however, this statement is not sufficient for our security goal, which is to show  $\delta^{\mathcal{D}_{s_{sec}}}((X_{1,..., n/2},P), (U_{n/2 \log q}, P))\leq \epsilon$.  Furthermore, this construction is only useful if $\decode_t$ can be efficiently implemented. 

The rest of this section is devoted to making these intuitive statements precise.
 We describe the \class{LWE} problem and the security of our construction in \secref{subsec:LWE}.
We describe one possible polynomial-time $\decode_t$ (which corrects more errors than is possible by exhaustive search) in \secref{sec:time main construction}.  In \secref{sec:lossless extractor}, we describe parameter settings that allow us to extract as many bits as the input entropy, resulting in a lossless construction.  In \secref{sec:prg based comparison}, we compare \consref{cons:informal construction} to using a sketch-and-extract approach (\lemref{lem:fuzzy ext construction}) instantiated with a computational extractor. 

\subsection{Security of \consref{cons:informal construction}}
\label{subsec:LWE}
The $\LWE$ problem was introduced by Regev \cite{regev2005LWE,regevLWEsurvey} as a generalization of ``learning parity with noise." For a complete description of the $\LWE$ problem and related lattices problems~(which we do not define here) see~\cite{regev2005LWE}.  We now recall the decisional version of the problem. 


\begin{definition}[Decisional $\lwe$]\label{def:dist-LWE}
Let $n$ be a security parameter.  
Let $\gamma= \gamma(n) = \poly(n)$ be an integer and $q = q(n) = \poly(n)$ be a prime\footnote{%
Unlike in common formulations of LWE, where $q$ can be any integer, we need $q$ to be prime for decoding.}.
%
Let $\vA$ be the uniform distribution over  $\Fq^{\gamma \times n}$, $X$ be the uniform distribution over $\Fq^n$ and $\chi$ be an arbitrary distribution on $\Fq^\gamma$.
 The decisional version of the $\LWE$ problem, denoted \class{dist}-$\LWE_{n, \gamma, q, \chi}$, is to distinguish the distribution
$(\vA, \vA X+\chi)$ from
 the uniform distribution over $(\Fq^{\gamma\times n}, \Fq^\gamma)$.

We say that $\distLWE_{n, \gamma, q, \chi}$ is $(\epsilon, s_{sec})$-secure if no (probabilistic) distinguisher of size $s_{sec}$ can distinguish the $\lwe$ instances from uniform except with probability $\epsilon$.  If for any $s_{sec} = \poly(n)$, there exists   $\epsilon  = \ngl(n)$ such that  $\distLWE_{n, \gamma, q, \chi}$ is $(\epsilon, s_{sec})$-secure, then we say  it is \emph{secure}.
\end{definition}

 Regev \cite{regev2005LWE} and Peikert \cite{peikert2009latticereduction} show that $\class{dist}$-$\lwe_{n, \gamma, q, \chi}$ is secure when the distribution $\chi$ of errors is Gaussian, as follows.
Let $\bar{\Psi}_\rho$ be the discretized Gaussian distribution with variance $(\rho q)^2/2\pi$, where $\rho \in (0,1)$ with $\rho q > 2\sqrt{n}$.  If GAPSVP and SIVP are hard to approximate~(on lattices of dimension $n$) within polynomial factors for quantum algorithms, then $\distLWE_{n, \gamma, q, \bar{\Psi}_\rho^m}$ is secure.  (A recent result of Brakerski et al.~\cite{brakerski2013classical} shows security of $\LWE$ based on hardness of approximating lattices problems for classical algorithms.  We have not considered how this result can be integrated into our analysis.)

The above formulation of $\LWE$ requires the error term to come from the discretized Gaussian distribution, which makes it difficult to use it for constructing fuzzy extractors (because using $w$ and $w'$ to sample Gaussian distributions will increase the distance between the error terms and/or reduce their entropy).
Recent work of D\"{o}ttling and M\"{u}ller-Quade~\cite{dottling2012} shows the security of $\LWE$, under the same assumptions, when errors come from the uniform distribution over a small interval\footnote{Micciancio and Peikert provide a similar formulation in~\cite{micciancio2013hardness}.  The result D\"{o}ttling and M\"{u}ller-Quade provides better parameters for our setting.}.  This allows us to directly encode $w$ as the error term in an $\LWE$ problem by splitting it  into $\gamma$ blocks.  The size of these blocks is dictated by the following result of D\"{o}ttling and M\"{u}ller-Quade:
\begin{lemma}~\protect{~\cite[Corollary 1]{dottling2012}}
\label{lem:uniform LWE decision}
Let $n$ be a security parameter.  Let $q = q(n) = \poly(n)$ be a prime and $\gamma = \gamma(n) = \poly(n)$ be an integer with $\gamma \ge 3n$. Let $\sigma \in (0, 1)$ be an arbitrarily small  constant and let $\rho=\rho(n)\in (0,1/10)$ be such that $\rho q \geq 2n^{1/2+\sigma}\gamma$. If the approximate decision-version of the shortest vector problem (GAPSVP) and the shortest independent vectors problem (SIVP) are hard within a factor of $\tilde{O}(n^{1+\sigma}\gamma /\rho)$ for quantum algorithms in the worst case, then, for $\chi$ the uniform distribution over $[-\rho q, \rho q]^\gamma$,  $\distLWE_{n, \gamma, q, \chi}$ is secure.
\end{lemma}

To extract pseudorandom bits, we use a result of Akavia, Goldwasser, and Vaikuntanathan~\cite{akavia2009} to show that $X$ has simultaneously many hardcore bits.  The result says that if $\distLWE_{(n-k, \gamma, q, \chi)}$ is secure then any $k$ variables of $X$ in a $\distLWE_{(n, \gamma, q, \chi)}$ instance are hardcore.  We state their result for a general error distribution~(noting that their proof does not depend on the error distribution):
\begin{lemma}\protect{\cite[Lemma 2]{akavia2009}}
\label{lem:many hardcore bits}
 If $\distLWE_{(n-k, \gamma, q, \chi)}$ is $(\epsilon, s_{sec})$ secure, then
\[\delta^ {\mathcal{D}_{s_{sec'}}} ((X_{1,\dots, k}, \vA, \vA X+\chi) , (U_{k\log q}, \vA, \vA X+\chi)) \le \epsilon\,,\]
where  $\vA$ denotes the uniform distribution over $\Fq^{m\times n}$, $X$ denotes the uniform distribution over $\Fq^n$, $X_{1,\dots, k}$ denote the first $k$ coordinates of $x$, and $s_{sec}' \approx s_{sec} - n^3$.
\end{lemma}
The security of  \consref{cons:informal construction} follows from Lemmas \ref{lem:uniform LWE decision} and~\ref{lem:many hardcore bits} when parameters are set appropriately (see \thref{thm:lossless secure extractor log}),  because we use the hardcore bits of $X$ as our key.  

\subsection{Efficiency of \consref{cons:informal construction}}
\label{sec:time main construction}
\consref{cons:informal construction} is useful only if $\decode_t$ can be efficiently implemented.  We need a decoding algorithm for a random linear code with $t$ errors that runs in polynomial time.  We present a simple $\decode_t$ that runs in polynomial time and can correct $\Theta(\log n)$ errors (note that this corresponds to a super-polynomial number of possible error patterns).
This algorithm is a proof of concept, and neither the algorithm nor its analysis have been optimized for constants. An improved decoding algorithm can replace our algorithm, which will increase our correcting capability and improve \consref{cons:informal construction}.

\begin{construction}
\label{cons:decoding algorithm} We consider a setting of $(n, \gamma, q, \chi)$ where $\gamma\geq 3n$.  We describe $\decode_t$:
\begin{enumerate}
\item Input $\vA , \vb = \vA \vx + w - w'$
\item Randomly select rows without replacement $i_1,..., i_{2n}\leftarrow [1,\gamma]$.  
\item Restrict $\vA, \vb$ to rows $i_1,...,i_{2n}$; denote these $\vA_{i_1,...,i_{2n}}, \vb_{i_1,...,i_{2n}}$.
\item Find $n$ rows of $\vA_{i_1,..., i_{2n}}$ that are linearly independent.  
\\If no such rows exist, output $\perp$ and stop.
\item Denote by $\vA', \vb'$ the restriction of $\vA_{i_1,..., i_{2n}}, \vb_{i_1,..., i_{2n}}$ (respectively) to these rows. Compute $\vx' = (\vA')^{-1}\vb'$.  
\item If $\vb- \vA \vx'$ has more than $t$ nonzero coordinates, go to step (2).
\item Output $\vx'$.
\end{enumerate}
\end{construction}

Each step is computable in time $O(n^3)$. 
For $\decode_t$ to be efficient, we need $t$ to be small enough so that  with probability at least $\frac{1}{\poly(n)}$, none of the $2n$ rows  selected  in step 2 have errors (i.e., so that $w$ and $w'$ agree on those rows).  If this happens, and $\vA_{i_1,...,i_{2n}}$ has rank  $n$ (which is highly likely), then $\vx'=\vx$, and the algorithm terminates.  However, we also need to ensure correctness: we need to make sure that if $\vx'\neq \vx$, we detect it in step 6.  This detection will happen if $\vb-\vA \vx' = \vA (\vx-\vx')+(w-w')$ has more than $t$ nonzero coordinates.  It suffices to ensure that $\vA (\vx-\vx')$ has at least $2t+1$ nonzero coordinates (because at most $t$ of those can be zeroed out by $w-w'$), which happens whenever the code generated by $\vA$ has distance $2t+1$.

\textbf{Remark:} Fuzzy extractor definitions make no guarantee about \rep behavior when the distance between $w$ and $w'$ is larger than $t$.  Our $\decode$ algorithm will never output an incorrect key~(with high probability over the coins of \gen) but may not terminate.  It may be preferable to output the wrong key or $\perp$ when $\dis(w, w')>t$.

Setting $t = \Theta(\frac{\gamma}{n}\log n)$ is sufficient to ensure efficiency when $\dis(w, w')\le t$.    Random linear codes have distance at least $\Theta(\frac{\gamma}{n}\log n)$ with probability $1-e^{-\Omega(n)}$ (the exact statement is in \corref{cor:code high distance}), so this also ensures correctness.
The formal statement is below:
\begin{lemma}[Efficiency of $\decode_t$ when $t\leq d (\gamma/n-2)\log n$]
\label{lem:i t poly time}
Let $d$ be a positive constant and assume that $\dis(W, W')\leq t$ where $t\leq d(\frac{\gamma}{n}-2)\log n$.  Then $\decode_t$ runs in expected time $O(n^{4d+3})$ operations in $\Fq$~(this expectation is over the choice of random coins of $\decode_t$, regardless of the input, as long as $\dis(w, w')\le t$).  It outputs $X$ with probability $1-e^{-\Omega(n)}$ (this probability is over the choice of the random matrix  $\vA$ and random choices made by $\decode_t$).
\end{lemma}
\begin{proof}
We first show that our code has high distance with overwhelming probability.  In our construction $\gamma = poly(n)\geq 2n$ and $\delta = O (\log n /n)$.  This setting of parameters satisfies \thref{thm:random code good distance}:
\begin{lemma}
\label{cor:code high distance}
Let $n$ be a parameter and let $\gamma = \poly(n)\geq 2n$.  
Let $q$ be a prime and $\tau = \Theta(\frac{\gamma}{n}\log n )$.  For large enough values of $n$, when $\vA\in \Fq^{\gamma\times n}$ is drawn uniformly, the code generated by $\vA$ has distance at least $\tau$ with probability at least $1-e^{-\Omega(\gamma)}\geq 1-e^{-\Omega(n)}$.
\end{lemma}
\begin{proof}
Let $c$ be some constant.  Let $\delta = \tau/\gamma = \frac{c\log n}{n}$.  We show the corollary for the case when $\gamma = 2n$~(increasing the size of $\gamma$ only increases the relative distance).  It suffices to show that for sufficiently large $n$, there exists $\epsilon>0$ where $1- H_q(\frac{c\log n}{n}) - \epsilon = 1/2$ or equivalently that $H_q(\frac{c\log n}{m})< 1/2$ as then setting $\epsilon = 1/2-H_q(\frac{c\log n}{n})$ satisfies  \thref{thm:random code good distance}.  For sufficiently large $n$:
\begin{itemize}
\item $\frac{c\log n}{n}< 1/2$, so we can work with the binary entropy function $H_2$.  
\item $\frac{c\log n}{n}< .1 < 1/2$ and thus $H_q(\frac{c\log n}{n})< H_q(.1)$. 
\end{itemize}  Putting these statements together, for large enough $n$, $H_q(\frac{c\log n}{n})< H_q(.1) < H_2(.1)< 1/2$ as desired.  This completes the proof.
\end{proof}

Note that $\decode_t$ will stop if $w$ and $w'$ agree on all the rows selected in Step 2 (it may also stop for other reasons---namely, in step 4; but we do not use this fact to bound the expected running time).
The probability of each selected row having an error is at most $\frac{t}{\gamma - i}$ where $i$ is the number of rows already selected.  That is,
\begin{align*}
\Pr[i_1,..., i_{2n}\text{ have no errors}]&\geq \prod_{i=0}^{2n-1}\left(1 - \frac{t}{\gamma-i}\right)\geq \prod_{i=0}^{2n-1}\left( 1-\frac{d\left(\frac{\gamma}{n}-2\right)\log n}{\gamma-i}\right)\\
&\geq  \prod_{i=0}^{2n-1}\left( 1-\frac{d\log n}{n}\left(\frac{\gamma-2n}{\gamma-i}\right)\right)\geq \prod_{i=0}^{2n-1}\left( 1-\frac{d\log n}{n}\right) \\
&= \left(1-\frac{d\log n}{n}\right)^{2n}  = \left(\left(1-\frac{d\log n}{n}\right)^{\frac{n}{d\log n}}\right)^{2d\log n}\\&\geq \frac{1}{4^{2d\log n}} = \frac{1}{n^{4d}}\,.
\end{align*}
(The second-to-last step holds as long as $n\ge 2d\log n$.) Because at each iteration, we select $2n$ rows independently at random, the expected number of iterations is at most $n^{4d}$; each iteration takes $O(n^3)$ operations in $\Fq$, which gives us the expected running time bound.

The probability that $\decode_t$ outputs $\perp$ is bounded by 
\begin{eqnarray*}
\Pr[\decode_t\rightarrow \perp]& \le & \sum_{j=1}^\infty \Pr[\decode_t\rightarrow \perp \text{ in $j$-th iteration of step 4}]\\
&= &\sum_{j=1}^\infty  \Pr[\decode_t \text{ reaches $j$ iterations} \wedge \rank(\vA_{i_1,\dots, i_{2n}})<n]\\
&\le &\sum_{j=1}^\infty  \Pr[i_1,..., i_{2n}\text{ had errors $j-1$ times} \wedge \rank(\vA_{i_1,\dots, i_{2n}})<n]\\
&= &\sum_{j=1}^\infty  \Pr[i_1,..., i_{2n}\text{ had errors $j-1$ times}]\cdot \Pr[\rank(\vA_{i_1,\dots, i_{2n}})<n]\\
&\le &\sum_{j=1}^\infty  \left(1-\frac{1}{n^{4d}}\right)^{j-1} \cdot q^{-n}\\
& = & n^{4d} e^{-\Omega(n)} = e^{-\Omega(n)}\,.
\end{eqnarray*}
The third line from the bottom follows from the fact that the locations of the errors are assumed to be independent of the sketch, and therefore independent of the matrix $\vA$.
The second line from the bottom follows from \clref{cl:full rank matrix} when $\beta = n$; note that, because we use the union bound and evaluate the probability separately for each value of $j$,  we can treat $\vA_{i_1,\dots, i_{2n}}$ as a randomly chosen $2n\times n$ matrix, ignoring the fact that these matrices are correlated.

We claim that if the code generated by $\vA$ 
has distance at least $2t+1$, then $\decode_t$ will output $\perp$ or the correct $\vx'=\vx$.
Indeed, suppose $\vx'\neq \vx$. Since $\vA (\vx-\vx')$ has at least $2t+1$ nonzero coordinates by the minimum distance of the code generated by $\vA$, and at most  $t$ of those can be zeroed out by the addition of  $w-w'$, such an $\vx'$ will not pass Step 6. 

The probability that the code generated by $\vA$ has distance lower than $2t+1$ is at most $e^{-\Omega(n)}$ (see \corref{cor:code high distance}), the probability of outputting $\perp$ is also $e^{-\Omega(n)}$~(computed above).  This gives the correctness bound for $\decode_t$.
\end{proof}


\subsection{Lossless Computational Fuzzy Extractor}
\label{sec:lossless extractor}
We now state a setting of parameters that yields a lossless construction.  The intuition is as follows.  We are splitting our source into $\gamma$ blocks each of size $\log \rho q$~(from \lemref{lem:uniform LWE decision}) for a total input entropy of $\gamma\log \rho q$.  $\Key$ is derived from hardcore bits of $X$: $X_{1,\dots, k}$ and is of size $k \log q$~(from \lemref{lem:many hardcore bits}). Thus, to achieve a lossless construction we need $k \log q = \gamma\log \rho q$.
In other words, in order to decode a meaningful number of errors, the vector $w$ is of higher dimension than the vector $X$, but each coordinate of $w$ is sampled using fewer bits than each coordinate of $X$.    Thus, by increasing the size of $q$~(while keeping $\rho q$ fixed) we can set $k\log q = \gamma\log \rho q$, yielding a $|\key| = |W|$.  The formal statement is below. 

\begin{theorem}
\label{thm:lossless secure extractor log}
Let $n$ be a security parameter and let the number of errors $t = c\log n$ for some positive constant $c$.    Let $d$ be a positive constant (giving us a tradeoff between running time of $\rep$ and $|w|$). Consider the Hamming metric over the alphabet $\mathcal{Z}=[-2^{b-1},2^{b-1}]$, where  $b = \log 2(c/d+2) n^2 =O(\log n)$.  Let $W$ be uniform over $\mathcal{M}=\mathcal{Z}^\gamma$, where $\gamma={(c/d+2)n}=O(n)$.  If GAPSVP and SIVP are hard to approximate within polynomial factors using quantum algorithms, then there is a setting of $q = \poly(n)$ such that for any polynomial $s_{sec}=\poly(n)$ there exists $\epsilon=\ngl(n)$ such that the following holds: \consref{cons:informal construction} is a $(\M, W, \gamma\log |\mathcal{Z}|, t)$-computational fuzzy extractor that is $(\epsilon, s_{sec})$-hard with error $\delta = e^{-\Omega(n)}$.
The generate procedure $\gen$ takes $O(n^2)$ operations over $\Fq$, and the reproduce procedure $\rep$ takes expected time $O(n^{4d+3})$ operations over $\Fq$.
\end{theorem}
\begin{proof}
Security follows by combining Lemmas \ref{lem:uniform LWE decision} and \ref{lem:many hardcore bits}; efficiency follows by Lemma \ref{lem:i t poly time}. For a more detailed explanation of the various parameters and constraints see \secref{sec:parameter settings}.  
\end{proof}

\subsection{Comparison with computational extractor-based constructions}
\label{sec:prg based comparison}
An alternative approach to building a computational fuzzy extractor is to use  a computational extractor (e.g.,~\cite{krawczyk2010cryptographic,barak2011leftover,dachman2012computational}) in place of the information-theoretic extractor in the sketch-and-extract construction.  We will call this approach \emph{sketch-and-comp-extract}.  (A simple example of a computational extractor is a pseudorandom generator applied to the output of an information-theoretic extractor; note that LWE-based pseudorandom generators exist~\cite{applebaum2006pseudorandom}.)

This approach (specifically, its analysis via \lemref{lem:fuzzy ext construction}) works as long as the amount of entropy $\tilde{m}$ of $w$ conditioned on the sketch $s$ remains high enough to run a computational extractor.  However, as discussed in the introduction, secure sketches are subject to strong lower bounds.  For many practical sources, there are no known constructions of secure sketches.

In contrast, our approach does not require the entropy of $w$ conditioned on $p=(\vA, \vA X+w)$ to  be high enough for a computational extractor. Instead, we require that $w$ is not computationally recoverable  given $p$.  This requirement is weaker---in particular, in our construction, $w$ may have no information-theoretic entropy conditioned on $p$.  

%The main advantage of our analysis (instead of sketch-and-comp-extract) is that security need not depend on the error-tolerance $t$.  In our construction, the error-tolerance depends only on the best available decoding algorithm for random linear codes, because decoding algorithms will not reach the information-theoretic decoding radius.

Unfortunately, the above construction comes with strong limitations on the error tolerance and supported sources. Herder et al.~\cite{herder2014trapdoor} subsequently improved the error-tolerance for some sources using confidence information (discussion in the introduction).  In \chapref{chap:more errors than ent}, we show practical constructions based on point obfuscation.  These constructions do not use \emph{sketch-then-comp-extract}.
In the next section, we show that \consref{cons:informal construction} is secure for more sources than just uniform $W$.
%To summarize, the advantage of \consref{cons:informal construction} is that the security of our construction does not depend on the decoding radius $t$.  
%The disadvantages of \consref{cons:informal construction} are that it supports a limited number of errors and only a uniformly distributed source.  We begin to address this second problem in the next section.

\section{Moving to Nonuniform Sources}
\label{sec:LWE block fixing sources}
In this section, we show that~\consref{cons:informal construction} is secure for a particular class of distributions called symbol-fixing.   First we define a symbol fixing source~(from~\cite[Definition 2.3]{KZ07}): 
\begin{definition}
Let $W = (W_1,..., W_{\gamma+\alpha})$ be a distribution where each $W_i$ takes values over an alphabet $\mathcal{Z}$.  We say that it is a $(\gamma+ \alpha, \gamma, |\mathcal{Z}|) $ \emph{symbol fixing source} if for $\alpha$ indices $i_1, \dots, i_\alpha$, the symbols $W_{i_\alpha}$ are fixed, and the remaining $m$  symbols are chosen uniformly at random.  Note that $H_\infty(W)=\gamma \log |\mathcal{Z}|$.
\end{definition}

Symbol-fixing sources are a very structured class of distributions.  However, extending \consref{cons:informal construction} to such a class is not obvious.  Although symbol-fixing sources are deterministically extractible~\cite{KZ07}, we cannot first run a deterministic extractor before using \consref{cons:informal construction}.  This is because we need to preserve distance between $w$ and $w'$ and an extractor must not preserve distance between input points.  We present an alternative approach, showing security of $\LWE$ directly with symbol-fixing sources.

The following theorem states the main technical result of this section, which is of potential interest outside our specific setting. The result is that $\distLWE$ with symbol-fixing sources is implied by standard $\distLWE$ (but for $n$ and $m$ reduced by the amount of fixed symbols).  
\begin{theorem}
\label{thm:blockLWE}
Let $n$ be a security parameter, $\gamma, \alpha$ be polynomial in $n$, and $q=\poly(n)$ be a prime and $\beta\in\mathbb{Z^+}$ be such that $q^{-\beta} = \ngl(n)$. 
Let $U$ denote the uniform distribution over $\mathcal{Z}^\gamma$ for an alphabet $\mathcal{Z}\subset \Fq$, and let $W$ denote an $(\gamma+\alpha, \gamma, |\mathcal{Z}|)$ symbol fixing source over $\mathcal{Z}^{\gamma+\alpha}$.
If $\distLWE_{n, \gamma,q, U}$ is secure, then $\distLWE_{n+\alpha+\beta, \gamma+\alpha, q, W}$ is also secure.
\end{theorem}

\thref{thm:blockLWE} also holds for an arbitrary error distribution~(not just uniform error) in the following sense.  Let $\chi'$ be an arbitrary error distribution.  Define $\chi$ as the distribution where $\gamma$ dimensions are sampled according to $\chi'$ and the remaining dimensions have some fixed error.  Then, security of $\distLWE_{n, \gamma, q, \chi'}$ implies security of $\distLWE_{n+\alpha+ \beta, \gamma+\alpha, q, \chi}$.  We show this stronger version of the theorem in \secref{sec:proof of block theorem}.

The intuition for this result is as follows.  Providing a single sample with no error ``fixes'' at most a single variable.  Thus, if there are significantly more variables than samples with no error,  search $\LWE$ should still be hard.  We are able to show a stronger result that $\distLWE$ is still hard.  The nontrivial part of the reduction is using the additional $\alpha+ \beta$ variables  to ``explain'' a random value for the last $\alpha$ samples, without knowing the other variables.  The $\beta$ parameter is the slack needed to ensure that the ``free'' variables have influence on the last $\alpha$ samples.  A similar theorem for the case of a single fixed dimension was shown in concurrent work by Brakerski et al.~\cite[Lemma 4.3]{brakerski2013classical}.  The proof techniques of Brakerski et al.\,can be extended to our setting with multiple fixed dimensions, improving the parameters of \thref{thm:blockLWE}~(specifically, removing the need for $\beta$).

\thref{thm:blockLWE} allows us to construct a lossless computational fuzzy extractor from block-fixing sources: 

\begin{theorem}
\label{thm:lossless block sketch log}
Let $n$ be a security parameter and let $t = c\log n$ for some positive constant $c$.  Let $d\le c$ be a positive constant and consider the Hamming metric over the alphabet $\mathcal{Z}=[-2^{b-1},2^{b-1}]$, where $b \approx \log 2(c/d+2)n^2 = O(\log n)$.  Let $\mathcal{M} = \mathcal{Z}^{\gamma+\alpha}$ where $\gamma= (c/d+2)n=O(n)$ and $\alpha \leq n/3$. 
Let $\mathcal{W}$ be the class of all $(\gamma+\alpha, \gamma, |\mathcal{Z}|)$-symbol fixing sources.  If GAPSVP and SIVP are hard to approximate within polynomial factors 
using quantum algorithms, then  there is a setting of $q = \poly(n)$ such that for any polynomial $s_{sec} = \poly(n)$
 there exists $\epsilon = \ngl(n)$ 
such that the following holds: \consref{cons:informal construction} is a $(\M, \mathcal{W}, \gamma\log |\mathcal{Z}|, t)$-computational fuzzy extractor that is $(\epsilon, s_{sec})$-hard with error $\delta = e^{-\Omega(n)}$.
 The generate procedure $\gen$ takes $O(n^2)$ operations over $\Fq$, and the reproduce procedure $\rep$ takes expected time $O(n^{4d+3} \log n)$ operations over $\Fq$.
\end{theorem}

\begin{proof} Security follows by Lemmas~\ref{lem:uniform LWE decision} and~\ref{lem:many hardcore bits} and  \thref{thm:blockLWE} .  
Efficiency follows by \lemref{lem:i t poly time}.  For a more detailed explanation of parameters see \secref{ssec:block params}. 
\end{proof}



\section{Proof of Theorem \ref{thm:blockLWE}}
\label{sec:proof of block theorem}

\begin{proof}
We assume that all of the fixed blocks are located at the end and their fixed value is $0$.  If the blocks are fixed to some other value, the reduction is essentially the same.
 In the reduction, the distinguisher is allowed to depend on the source and can know the positions of the fixed blocks and their values.  For a matrix $\vA$ we will denote the $i$-th row by $\va_i$.  For a set $T$ of column indices, we denote by $\vA_T$ the restriction of the matrix $\vA$ to the columns contained in $T$.  Similarly, for a vector $\vx$ we denote by $\vx_T$ the restriction of $\vx$ to the variables contained in $T$.  We use similar notation for the complement of $T$, denoted $T^c$.  For a matrix or vector we use $\mathsf{T}$ to denote the transpose.  We use $i$ as a index into matrix rows and the error vector and $j$ as an  index into columns and the solution vector.

Let $n$ be a security parameter, $\gamma ,q , \alpha= \poly(n)$.  Let $\beta$ be such that $q^{-\beta} = \ngl(n)$.  All operations are computed modulo $q$, and we omit $``\bmod q$'' notation.  Let $\chi'$ be some error distribution over $\Fq^m$ and let $\chi$ over $\Fq^{m+n}$ be defined by sampling $\chi'$ to obtain values on dimensions $1,..., m$ and then appending $\alpha$ 0s.  

Let $D$ be a distinguisher that breaks $\distLWE_{(\gamma+\alpha), (n+\alpha+\beta), q, \chi}$ with advantage $\epsilon>1/\poly(n)$.
Let  $\vA$ denote the uniform distribution over $\Fq^{(\gamma+\alpha)\times(n+\alpha+\beta)}$, $X$ denote the uniform distribution over $\Fq^{(n+\alpha+\beta)}$, and $U$ denote the uniform distribution over $\Fq^{\gamma+\alpha}$ . Then
\[
|\Pr[D(\vA, \vA X+\chi) = 1] - \Pr[D(\vA, U )=1]|> \epsilon.
\]

We build a distinguisher that breaks $\distLWE_{\gamma, n, q, \chi}$.  Let $\vA'$ denote the uniform distribution over $\Fq^{\gamma\times n}$, $X'$ denote the uniform distribution over $\Fq^n$, and $U'$ denote the uniform distribution over $\Fq^{\gamma}$ .  We will build a distinguisher $D'$ of polynomial size for which
\begin{align}
\label{eq:block LWE dist}
|\Pr[D'(\vA', \vA'X'+\chi') = 1] - \Pr[D'(\vA', U') =1]|> (\epsilon - \ngl(n))(1-\ngl(n)) \approx \epsilon.
\end{align}
$D'$  will make a single call to $D$, so we focus on how to prepare a random block-fixing instance for $D$ from the random instance that $D'$ is given.  The code for $D'$ is given in \figref{fig:perfectLWEreduction}.

\begin{figure}[p]
\begin{framed}
\begin{enumerate}
\item Input $\vA', \vb'$, where $\vA' \overset{\$} \leftarrow \Fq^{\gamma\times n}$ and $\vb'$ is either uniform over $\Fq^\gamma$ or $\vb' = \vA'\vx' +\ve'$ for $\ve'\overset{\$} \leftarrow \chi'$ and uniform $\vx'\  \overset{\$} \leftarrow \Fq^n$.
\item Choose $\vect{R} \overset{\$}\leftarrow \Fq^{\alpha \times n}$ uniformly at random. Initialize $\vQ \in \Fq^{\gamma\times (\alpha+\beta)}$  to be the zero matrix.
\item Let $\vb^* = (\vb', b^*_{\gamma+1}, \ldots,b^*_{\gamma+\alpha})$, for uniformly chosen $(b^*_{\gamma+1}, \ldots, b^*_{\gamma+\alpha} )\overset{\$} \leftarrow \Fq^\alpha$.\label{step:b generation}
\item Choose $\vect{S} \overset{\$}\leftarrow \Fq^{\alpha \times (\alpha+\beta)}$ uniformly at random.
		\subitem If $\rank(\vect{S})<\alpha$, stop and output a random bit.
\item Find a set of $\alpha$ linearly independent columns in $\vS$.  Let $T$ be the set of indices of these columns.\label{step:find columns}
\item For all $1\le j \le \alpha+\beta$, $j\notin T$:
\label{step:fill in matrix}
\subitem Choose $x_{n+j}\overset{\$}\leftarrow \Fq$ uniformly at random.  
\subitem For $i=1,..., \gamma$:
\subsubitem Choose $\vQ_{i,j}\overset{\$}\leftarrow \Fq$ uniformly at random.
\subsubitem Set $b_i^* = b_i^* + \vQ_{i,j} x_{n+j}$.
\item Initialize $\vA^*  = \left(\begin{array}{c | c}\vA' & \vQ\\\hline \vR & \vS\end{array}\right)$.
\item \label{step:randomization}
For {$i=1,..., \gamma$}:
\subitem Choose a row vector $\pi_i \leftarrow \Fq^{1 \times \alpha}$ uniformly at random.
\subitem Set $\va_{i} \leftarrow \va^*_{i}+\pi_i (\vR||\vS)$
\subitem Set $b_i \leftarrow b^*_i + \pi_i (b^*_{\gamma+1},..., b^*_{\gamma+\alpha})^{\mathsf{T}}$
\item For $i=\gamma+1,\dots, \gamma+\alpha$:
\subitem Set $\va_i \leftarrow \va^*_i$
\subitem Set $b_i = b_i^*$.
\item Output $D(\vA, \vb)$. 
\end{enumerate}
\end{framed}
\caption{A PPT $D'$ that distinguishes LWE using distinguisher for LWE w/ block fixing source}
\label{fig:perfectLWEreduction}
\end{figure}

The distinguisher $D'$ has an advantage when $\vS$ is of rank $\alpha$.  This occurs with overwhelming probability:
\begin{claim}
\label{cl:full rank matrix 5.2}
Let $\vS \overset{\$}\leftarrow \Fq^{\alpha \times (\alpha+\beta)}$ be randomly generated.  Then $\Pr[\rank(\vS)=\alpha]\geq  1- \ngl(n)$.
\end{claim}
\begin{proof}
Direct result of \clref{cl:full rank matrix} because $q^{-\beta} = \ngl(n)$.
\end{proof}
The probability that a random $\vS$ is not full rank is $\ngl(n)$ so the distinguisher $D$ must still have an advantage when the matrix $\vS$ is full rank.  That is,
\[
|\Pr[D(\vA, \vA X+\chi) = 1  | \rank(\vS) = \alpha] - \Pr[D(\vA, U) =1 | \rank(\vS) = \alpha]|> \epsilon - \ngl(n).
\]

It suffices to show that $D'$ prepares a good instance for $D$ conditioned on $\vS$ being full rank. We show this in the following three claims: 
\begin{enumerate}
\item If $\vA'$ is a random matrix then $\vA$ is a random matrix subject to the condition that $\rank(\vS) = \alpha$.
\item If $\vb' = \vA'\vx'+\ve'$ for uniform $\vA'$ and $\vx'$, then $\exists \vx$~(uniformly distributed and independent of $\vA$ and $\ve'$) such that $\vb = \vA \vx + \ve$, where $\ve_i = \ve_i'$ for $1\leq i\leq \gamma$ and $\ve_i = 0$ otherwise.
\item If the conditional distribution $\vb'\,|\,\vA'$ is uniform, then the conditional distribution $\vb\,|\,\vA$ is also uniform.
\end{enumerate}

\begin{claim}
\label{cl:randomMatrixDist}
The matrix $\vA$ is distributed as a uniformly random choice from the set of all matrices whose bottom-right $\alpha\times (\alpha+\beta)$ submatrix $\vS$ satisfies $\rank(\vS) = \alpha$.
\end{claim}
\begin{proof}
The bottom $\alpha$ rows of $\vA$ (namely, $\vR|\vS$) are randomly generated~(conditioned on $\rank(\vS) =\alpha$).  The top left $\gamma\times n$ quadrant of $\vA$ is also random, because it is produced as a sum of a uniformly random $\vA'$ with some values that are uncorrelated with $\vA'$.
The submatrix of the top-right $\gamma\times (\alpha+\beta)$ quadrant corresponding to $\vQ_{T^c}$~(recall this is the restriction of $\vQ$ to the columns not in $T$) is also random, because it is initialized with random values to which some uncorrelated values are then added. It is important to note that all these values are independent of $\pi_i$ values.

Thus, we restrict attention to the $\gamma\times \alpha$ submatrix of $\vA$ that corresponds to $\vQ_T$ in $\vA^*$ (note that these values are $0$ in $\vA^*$).  Consider a particular row $i$. That row is computed as $\pi_i \vS_{T}$.  Since $\vS_T$ is a full rank square matrix and $\pi_i$ is uniformly and independently generated, that row is also uniform and independent of other entries in $\vA$.
\end{proof}
\begin{claim}
\label{cl:random ax+e}
If $D'$ is provided with input distributed as $\vA', \vb' = \vA'\vx'+\ve'$ then $\vb = \vA \vx+\ve$, where
\begin{itemize}
\item $e_i = e_i'$ for $1\leq i\leq \gamma$,
\item $e_i = 0$ for $\gamma<i\leq \gamma+\alpha$,
\item $x_j = x_j'$ for $1\leq j \leq n$,
\item and $x_j$ is uniform and independent of $\vA$ and $\ve'$ for $n<j\le n+\alpha+\beta$,
\end{itemize}
\end{claim}
\begin{proof}
Partially define $\vx$ as $x_j = x_j'$ if $1\leq j \leq n$ and $x_j$ as the value generated in step~\ref{step:fill in matrix} for $j>n $ and $j\not\in T$.  Define the remaining variables $\vx_T$ as the solution to the following system of equations.
\begin{eqnarray}
\vS_T  \vx_T = \begin{pmatrix} b_{\gamma+1}^*  \\ \vdots \\b_{\gamma+\alpha}^* \end{pmatrix}  - \vR \vx' - \vS_{T^c} \vx_{T^c}  \label{eq:x t solution}
\end{eqnarray}
A solution $\vx_T$ exists as $\vS_T$ is full rank. Moreover, it is uniform and independent of $\vA$ and $\ve$, because $b^*_{\gamma+1}, \dots, b^*_{\gamma+\alpha}$ are uniform and independent of $\vA$ and $\ve$. 

We now show that $\vb^* = \vA^* \vx+\ve$.  All entries in matrix $\vQ$ corresponding to variables in $T$ are set to zero.  Thus, the values of $\vx^T$ do not affect $b_i^*$ for $1\le i \le \gamma$.  The values of $\vx_{T^c}$ are manually set, and $\vQ_{i, j} \vx_{j}$ is added to the corresponding $b_i^*$.  Thus, for $1 \leq i \le \gamma$, we have $\vb^* = \vA^*\vx+\ve$.   For $\gamma< i$, this constraint is also satisfied by the values of $\vx_T$ set in Equation~\ref{eq:x t solution}.  

Thus, it remains to show that step~\ref{step:randomization} preserves this solution.
We now show that for all rows $1\leq i\leq \gamma$, if $b_i^* = \va^*_i \vx+e_i$  then $b_i = \va_i \vx + e_i$.
Recall the other rows are not modified.  We have the following for $1\leq i\leq \gamma$:
\begin{align*}
\va_i \vx + e_i &= \left(\va_{i}^*+ \pi_i(\vR || \vS)\right) \vx + e_i\\
&=\va_i^* \vx + e_i +\pi_i(\vR||\vS) \vx\\&= b_i^* + \pi_i (\vR||\vS)\vx
\end{align*}
Recall that $b_i =b_i^* + \pi_i(b_{\gamma+1}^*,..., b_{\gamma+k}^*)$.  We consider the product $(\vR|| \vS) \vx$.  It suffices to show that $(\vR|| \vS) \vx = (b_{\gamma+1}^*,..., b_{\gamma+\alpha}^*)$,
\begin{align*}
(\vR|| \vS) \vx &= \vR \begin{pmatrix} \vx_1  \\ \vdots \\\vx_n \end{pmatrix}+  \vS_{T^c} \vx_{T^c}  + \vS_T \vx_T \\
&=\vR \begin{pmatrix} \vx_1  \\ \vdots \\\vx_n \end{pmatrix}+  \vS_{T^c} \vx_{T^c}   + \begin{pmatrix} b_{\gamma+1}^*  \\ \vdots \\b_{\gamma+\alpha}^* \end{pmatrix}  - \vR \begin{pmatrix} \vx_1  \\ \vdots \\\vx_n \end{pmatrix}- \vS_{T^c} \vx_{T^c}  
\\&=  \begin{pmatrix} b_{\gamma+1}^*  \\ \vdots \\b_{\gamma+\alpha}^* \end{pmatrix}
\end{align*}
This completes the proof of the claim.
\end{proof}
\begin{claim}\label{clm:random b}
If the conditional distribution $\vb'\,|\,\vA'$ is uniform, then $\vb\,|\,\vA$ is also uniform.
\end{claim}
\begin{proof}
Since $\vR, \vS$, and $\vQ$ are chosen independently of $\vb'$, the distribution $\vb'\,|\,\vA^*$ is uniform.
Let $\vb^*$ be the vector generated after step~\ref{step:fill in matrix}. Its first $\gamma$ coordinates are  computed by adding the uniform vector $\vb'$ to values that are independent of $\vb^*$, and its remaining $\alpha$ coordinates $b^*_{\gamma+1},\dots,b^*_{\gamma+\alpha}$ are   chosen uniformly.  Thus $\vb^*\,|\,\vA^*$ is uniform. 

Let $\vgamma$ represent the matrix formed by $\pi_{i}$.  It is independent of $\vb^*$ and $\vA^*$, so $\vb^*\,|\,(\vA^*, \vgamma)$ is uniform.    Let $\vgamma'=\left(\begin{array}{c | c}\vect{I_\gamma} & \vgamma \\\hline \vect{0} & \vect{I_\alpha}\end{array}\right)$.
Note that $\vb=\vgamma' \vb^*$.  Since $\vb^*\,|\,(\vA^*, \vgamma)$ is uniform, and $\vgamma'$ is invertible, $\vb\,|\,(\vA^*, \vgamma)$ must also be uniform.
Since $\vA$ is a deterministic function of $\vA^*$ and $\vgamma$ (assuming Step~\ref{step:find columns} is deterministic---if not, we can fix the coins used), the distribution $\vb\,|\,\vA$ is the same  as $\vb\,|\,(\vA^*, \vgamma)$ and is thus also uniform.
\end{proof}

Finally, the reduction runs in polynomial time and together Claims~\ref{cl:randomMatrixDist},~\ref{cl:random ax+e}, and~\ref{clm:random b} show that when $\rank(\vS) = \alpha$ the distinguisher $D'$ properly prepares the instance thus, 
\begin{align*}
&\left|\Pr[D'(\vA, \vA X+\chi) = 1] - \Pr[D'(\vA, U) =1] \right|\\
&\, = (| \Pr\left[D'(\vA', \vu') = 1 | \rank(\vS) = \alpha \right]\\&\,\,\,\,- \Pr\left[D'(\vA', \vb'=\vA'\vx + \ve)=1 | \rank(\vS) = \alpha\right]|) \Pr[\rank(\vS) = \alpha] \\
&\, =(|\Pr[D(\vA, \vA X+\chi) = 1  | \rank(\vS) = \alpha] \\&\,\,\,\,- \Pr[D(\vA, U) =1 | \rank(\vS) = \alpha]  |) \Pr[\rank(\vS) = \alpha] \\
&\, \geq (\epsilon - \ngl(n))(1-\ngl(n)) \approx \epsilon
\end{align*}
Where the second line follows because we can detect when $\rank(\vS)<\alpha$ and output a random bit in this case.
Thus, Equation~(\ref{eq:block LWE dist}) is satisfied, this completes the proof.
\end{proof}

\section{Parameter Settings for \consref{cons:informal construction}}
\label{sec:parameter settings}
In this section, we explain the different parameters that go into our construction.  In \thref{thm:lossless secure extractor log} we give a lossless fuzzy extractor from a security parameter $n$ and an error $t$.  In this section, we discuss constraints imposed by 1) efficient decoding 2) maintaining security of the LWE instance and 3) ensuring no entropy loss of the construction.  We begin by reviewing the parameters that make up our construction:

\begin{itemize}
\item $|W|$: The length of the source.  
\item $t$: Number of errors that can be supported.  
\item $n$: LWE security parameter (i.e., number of field elements in $X$), which must be greater than some minimum value $n_0$ for security.
\item $q$: The size of the field.  
\item $\rho$: The fraction of the field needed for error sampling.  
\item $\gamma$: The length of the string $w$ in symbols.  
\item $k$: The number of hardcore bits in $X$~(from \lemref{lem:many hardcore bits}).
\end{itemize}
We will split the source $|W|$ into $\gamma$ blocks each of size $2\rho q+1$~(that is, $|W| = \gamma\log (2\rho q+1)$).  We will ignore the parameter $|W|$ and focus on $t, n, q, \rho,$ and $\gamma$.  As stated above we have three constraints:
\begin{itemize}
\item Maintain security of LWE.  If we assume GAPSVP and SIVP are hard to approximate within polynomial factors then \lemref{lem:uniform LWE decision} says that we get security for all $n$ greater than some minimum $n_0$ and $q = \poly(n)$ and $\rho q \geq 2 n^{1/2 + \sigma} \gamma = \poly(n)$.  The only reason to increase $\rho q$ over this minimum amount (other than security) is if the number of errors in $W$ decreases with a slightly larger block size.  We ignore this effect and assume that $\rho q = 2n^{1/2+\sigma}\gamma$.
\item Maintain efficient decoding of Construction~\ref{cons:decoding algorithm}.  Using \lemref{lem:i t poly time}, this means that $t\leq d\log n(\gamma/n-2)$.
\item Minimize entropy loss of the construction.  We will output $X_{1,...,k}$ so the entropy loss of the construction is $|W|-|X_{1,..., k}|$.  We want the entropy loss to be zero, that is, $|W| = |X_{1,..., k}|$.  Substituting, one has $\gamma\log 2\rho q+1 = k \log q$.
\end{itemize}
Collecting constraints we can support any setting where $t, n, q, \rho, \gamma, k$ satisfy the following constraints~(for constants $d, f$):
\begin{align*}
n_0&< n -k \\
t&\leq d \log n\left(\frac{\gamma}{n}-2\right)\\
q &= n^f\\
\rho q  &= 2n^{1/2+\sigma}\gamma\\
\gamma\log (2\rho q +1)&= k \log q
\end{align*}
Substituting $q = n^f$ and $\rho q = 2n^{1/2+\sigma}\gamma$ yields the following system of equations:
\begin{align*}
n_0&< n - k\\
t&\leq d\log n\left(\frac{\gamma}{n}-2\right)\\
\gamma \log (4n^{1/2+\sigma}\gamma +1)&= k \log n^f
\end{align*}
This is the most general form of our construction, we can support any $n, t, \gamma$ that satisfy these equations for constants $d, f$.  However, the last equation may have no solution for $f$ constant.  Putting the last equation in terms of $f$ one has:
\begin{align*}
n_0&< n -k \\
t&\leq d\log n\left(\frac{ \gamma }{n} -2\right)\\
f &= \frac{\gamma}{k}\frac{\log 4n^{1/2+\sigma} \gamma+1}{\log n}
\end{align*}
To ensure $f$ is a constant, we set $t = c \log n$ for some constant $c$ and that $k = n /g$ for some constant $g> 1$.  Finally we assume that $\gamma$ is the minimum value such that $t \leq d \log n(\gamma/n-2)$~(that is, there are only as many dimensions as necessary for decoding using \lemref{lem:i t poly time}):
\begin{align*}
n_0&< n -k \\
\gamma &= \frac{(c/d+2)n \log n}{\log n} = (\frac{c}{d}+2)n\\
f &= \frac{\gamma}{k}\frac{\log 4n^{1/2+\sigma}\gamma+1}{\log n} = \frac{g(c+2d)}{d}\frac{\log (\frac{4(c+2d)}{d} n^{3/2+\sigma}+1)}{\log n}
\end{align*}
Note that $f$ is at a constant in $n$.
Assuming $n-k = n(1-1/g) > n_0$ and letting $t= c\log n$ we get the following setting:
\begin{align*}
\gamma &= (\frac{c}{d}+2)n\\
q & = n^f = n^{\frac{\gamma}{k}\frac{\log (4n^{1/2+\sigma}\gamma+1)}{\log n}} = \poly(n)\\
\rho q &= 2n^{1/2+\sigma}\gamma = 2(\frac{c}{d}+2)n^{3/2+\sigma}
\end{align*}

Note, that $f> \frac{\gamma}{k}\geq \frac{\gamma}{n} \geq \frac{(c/d+2)n}{n} \geq 3$ as long as $d<c$~(this also ensures that $\gamma\geq 3n$, as required for \lemref{lem:i t poly time} to hold).  Since $\rho q = 2n^{1/2+\rho }\gamma = O(n^{5/2})$ in our setting $\rho = O(n^{-1/2})$.  Thus, for large enough settings of parameters $\rho$ is less than $1/10$ as required by \lemref{lem:uniform LWE decision}.

Furthermore, we get decoding using $O(n^{4d+3})$ $\Fq$ operations.  We can output a $k$ fraction of $X$ and the bits will be pseudorandom~(conditioned on $\vA, \vA X+W$).  The parameter $g$ allows is a tradeoff between the number of dimensions needed for security and the size of the field $q$.  In \thref{thm:lossless secure extractor log}, we set $g=2$ and output the first half of $X$.  Setting $1<g<2$ achieves an increase in output length~(over the input length of $W$).   We also (arbitrarily) set $\sigma=1/2$ to simplify the statement of \thref{thm:lossless secure extractor log}, making $\rho q = 2(c/d+2) n^2$.

\subsection{Parameter Settings for \thref{thm:lossless block sketch log}}
\label{ssec:block params}
We repeat parameter settings for block fixing sources.  We now have $\gamma+\alpha$ as the number of samples, while $n + \alpha+\omega(1)$ is the number of variables.  We can support any setting where $t, n, q, \rho, \gamma, k, \alpha$ satisfy the following constraints~(for $\beta = \omega(1)$ and constants $d, f$):
\begin{align*}
n_0&< n -k  -\alpha -\beta\\
t&\leq d \log n\left(\frac{\gamma}{n}-2\right)\\
q &= n^f\\
\rho q  &= 2n^{1/2+\sigma}\gamma\\
\gamma\log (2\rho q+1)&= k \log q
\end{align*}
Substituting $q = n^f$ and $\rho q = 2n^{1/2+\sigma}\gamma$ yields the following system of equations:
\begin{align*}
n_0&< n - k - \alpha -\beta\\
t&\leq d\log n\left(\frac{\gamma}{n}-2\right)\\
\gamma \log (4n^{1/2+\sigma}\gamma +1)&= k \log n^f
\end{align*}
As before we can support any setting any $n, t, \gamma, \alpha$ that satisfy these equations for $\beta = \omega(1)$ and constants $d, f$.  However, the last equation may have no solution for $f$ constant.  Putting the last equation in terms of $f$ one has:
\begin{align*}
n_0&< n -k  - \alpha - \beta \\
t&\leq d\log n\left(\frac{ \gamma }{n} -2\right)\\
%f \log n &= \frac{m}{n}\log 2n^2m\\
f &= \frac{\gamma}{k}\frac{\log (4n^{1/2+\sigma} \gamma+1)}{\log n}
\end{align*}
To ensure $f$ is a constant, we set $t = c \log n$ for some constant $c$ and that $k, \alpha = n/3$ and $\beta = \log n$.  Finally we assume that $\gamma$ is the minimum value such that $t \leq  d \log n(\gamma/n-2)$~(that is, there are only as many dimensions as necessary for decoding using \lemref{lem:i t poly time}):
\begin{align*}
n_0&< n/3 -  \log n\\
\gamma &= \frac{(c/d+2)n \log n}{ \log n} = (\frac{c}{d}+2)n\\
f &= \frac{\gamma}{k}\frac{\log (4n^{1/2+\sigma}\gamma+1)}{\log n} = \left(3(\frac{c}{d}+2)\right)\frac{\log (4(\frac{c}{d}+2) n^{3/2+\sigma}+1)}{\log n} = O(1)
\end{align*}

Assuming $n/3-\log(n)> n_0$ and letting $t= c\log n$ we get the following setting:
\begin{align*}
\gamma &= (\frac{c}{d}+2)n\\
q & = n^f = n^{\frac{\gamma}{n}\frac{\log (4n^{1/2+\sigma}\gamma+1)}{\log n}} = \poly(n)\\
\rho q &= 2n^{1/2+\sigma}\gamma = 2(\frac{c}{d}+2)n^{3/2+\sigma}
\end{align*}

As before we arbitrarily set $\sigma = 1/2$, giving $\rho q = 2(\frac{c}{d}+2)n^2$.  Also, if $c<d$ then we get efficient decoding and $\rho = o(1)$ satisfying the condition of \lemref{lem:uniform LWE decision}.
