%% Thesis prospectus for Graduate school
%%
%% In the line below, 
%%
%% set version to 0 for short (ACNS 05 submission)
%% set version to 1 for long (eprint)
\def\version{1}
\def\shownotes{1}


\documentclass[runningheads,orivec,11pt]{llncs}
\usepackage{fullpage}

\usepackage{makeidx}  % allows for indexgeneration
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
%\usepackage{amsthm}
\usepackage{verbatim}
\usepackage{subfigure}
\usepackage{graphicx}
\usepackage{enumerate}
\usepackage{hyperref}
\usepackage{xspace}
\usepackage{graphicx}
\usepackage{latexsym}
\usepackage{color}
\usepackage{framed}
\usepackage{algpseudocode}


\newcommand{\secref}[1]{\mbox{Section~\ref{#1}}}
\newcommand{\subsecref}[1]{\mbox{Subsection~\ref{#1}}}
\newcommand{\apref}[1]{\mbox{Appendix~\ref{#1}}}
\newcommand{\thref}[1]{\mbox{Theorem~\ref{#1}}}
\newcommand{\exref}[1]{\mbox{Example~\ref{#1}}}
\newcommand{\defref}[1]{\mbox{Definition~\ref{#1}}}
\newcommand{\corref}[1]{\mbox{Corollary~\ref{#1}}}
\newcommand{\lemref}[1]{\mbox{Lemma~\ref{#1}}}
\newcommand{\assref}[1]{\mbox{Assumption~\ref{#1}}}
\newcommand{\probref}[1]{\mbox{Problem~\ref{#1}}}
\newcommand{\clref}[1]{\mbox{Claim~\ref{#1}}}
\newcommand{\propref}[1]{\mbox{Proposition~\ref{#1}}}
\newcommand{\remref}[1]{\mbox{Remark~\ref{#1}}}
\newcommand{\consref}[1]{\mbox{Construction~\ref{#1}}}
\newcommand{\figref}[1]{\mbox{Figure~\ref{#1}}}
\DeclareMathOperator*{\expe}{\mathbb{E}}
\DeclareMathOperator*{\var}{\text{Var}}


\newcommand{\class}[1]{{\ensuremath{\mathsf{#1}}}}
\newcommand{\gen}{\ensuremath{\class{Gen}}\xspace}
\newcommand{\rep}{\ensuremath{\class{Rep}}\xspace}
\newcommand{\sketch}{\ensuremath{\class{SS}}\xspace}
\newcommand{\rec}{\ensuremath{\class{Rec}}\xspace}
\newcommand{\enc}{\ensuremath{\class{Enc}}\xspace}
\newcommand{\dec}{\ensuremath{\class{Dec}}\xspace}
\newcommand{\prg}{\ensuremath{\class{prg}}\xspace}
\newcommand{\zo}{\ensuremath{\{0, 1\}}}
\newcommand{\vect}[1]{\ensuremath{\mathbf{#1}}}
\newcommand{\zq}{\ensuremath{\mathbb{Z}_q}}
\newcommand{\Fq}{\ensuremath{\mathbb{F}_q}}
\newcommand{\sample}{\ensuremath{\class{Sample}}\xspace}
\newcommand{\neigh}{\ensuremath{\class{Neigh}}\xspace}
\newcommand{\dis}{\ensuremath{\mathsf{dis}}}
\newcommand{\decode}{\ensuremath{\mathsf{Decode}}}
\newcommand{\guess}{\mathsf{guess}}


\newcommand{\A}{\mathcal{A}}


\newcommand{\metric}{\ensuremath{\mathtt{Metric}}\xspace}
\newcommand{\hill}{\ensuremath{\mathtt{HILL}}\xspace}
\newcommand{\hillrlx}{\ensuremath{\mathtt{HILL\mhyphen rlx}}\xspace}
\newcommand{\yao}{\ensuremath{\mathtt{Yao}}\xspace}
\newcommand{\unp}{\ensuremath{\mathtt{unp}}\xspace}
\newcommand{\unprlx}{\ensuremath{\mathtt{unp\mhyphen rlx}}\xspace}
\newcommand{\metricstar}{\ensuremath{\mathtt{Metric}^*}\xspace}
\newcommand{\metricd}{\ensuremath{\mathtt{Metric}^*\mathtt{-d}}\xspace}
\newcommand{\hillstar}{\ensuremath{\mathtt{HILL}^*}\xspace}
\newcommand{\hillprime}{\ensuremath{\mathtt{HILL'}}\xspace}
\newcommand{\metricprime}{\ensuremath{\mathtt{Metric'}}\xspace}
\newcommand{\metricprimestar}{\ensuremath{\mathtt{Metric'}^*}\xspace}
\newcommand{\hillprimestar}{\ensuremath{\mathtt{HILL'}^*}\xspace}
\newcommand{\poly}{\ensuremath{\mathtt{poly}}\xspace}
\newcommand{\rank}{\ensuremath{\mathtt{rank}}\xspace}
\newcommand{\ngl}{\ensuremath{\mathtt{ngl}}\xspace}
\newcommand{\Hoo}{\mathrm{H}_\infty}
\newcommand{\Hav}{\tilde{\mathrm{H}}_\infty}
\newcommand{\Hfuzz}{\mathrm{H}^{\mathtt{fuzz}}_{t,\infty}}
\newcommand{\Huse}{\mathrm{H}_{\mathtt{usable}}}
\newcommand{\Dom}{\mathsl{Dom}}
\newcommand{\Range}{\mathsl{Rng}}
\newcommand{\Keys}{\mathsl{Keys}}
\def\col{\mathrm{Col}}

\newcommand{\ddetbin}{\ensuremath{\mathcal{D}^{det,\{0,1\}}}}
\newcommand{\drandbin}{\ensuremath{\mathcal{D}^{rand,\{0,1\}}}}
\newcommand{\ddetrange}{\ensuremath{\mathcal{D}^{det,[0,1]}}}
\newcommand{\drandrange}{\ensuremath{\mathcal{D}^{rand,[0,1]}}}

\newcommand{\expinfo}{\ensuremath{\mathcal{E}}}
\newcommand{\ext}{\ensuremath{\mathtt{ext}}}
\newcommand{\cext}{\ensuremath{\mathtt{cext}}}
\newcommand{\rext}{\ensuremath{\mathtt{rext}}}
\newcommand{\cons}{\ensuremath{\mathtt{cons}}}
\newcommand{\decons}{\ensuremath{\mathtt{decons}}}


\newcommand{\lwe}{\class{LWE}}
\newcommand{\LWE}{\class{LWE}}
\newcommand{\distLWE}{\ensuremath{\class{dist\mbox{-}LWE}}}

%\newtheorem{theorem}{Theorem}[section]
%\newtheorem{lemma}[theorem]{Lemma}
%\newtheorem{proposition}[theorem]{Proposition}
%\newtheorem{corollary}[theorem]{Corollary}
%\newtheorem{definition}[theorem]{Definition}
%\newtheorem{assumption}[theorem]{Assumption}
%\newtheorem{claim}[theorem]{Claim}
%\newtheorem{problem}[theorem]{Problem}
%\newtheorem{construction}[theorem]{Construction}

\newcounter{ctr}
\newcounter{savectr}
\newcounter{ectr}

\newenvironment{newitemize}{%
\begin{list}{\mbox{}\hspace{5pt}$\bullet$\hfill}{\labelwidth=15pt%
\labelsep=5pt \leftmargin=20pt \topsep=3pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{3pt} }}{\end{list}}


\newenvironment{newenum}{%
\begin{list}{{\rm (\arabic{ctr})}\hfill}{\usecounter{ctr} \labelwidth=17pt%
\labelsep=5pt \leftmargin=22pt \topsep=3pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{2pt} }}{\end{list}}

\newenvironment{tiret}{%
\begin{list}{\hspace{2pt}\rule[0.5ex]{6pt}{1pt}\hfill}{\labelwidth=15pt%
\labelsep=3pt \leftmargin=22pt \topsep=3pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{2pt}}}{\end{list}}


\newenvironment{blocklist}{\begin{list}{}{\labelwidth=0pt%
\labelsep=0pt \leftmargin=0pt \topsep=10pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{20pt}}}{\end{list}}

\newenvironment{blocklistindented}{\begin{list}{}{\labelwidth=0pt%
\labelsep=30pt \leftmargin=30pt\topsep=5pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{10pt}}}{\end{list}}

\newenvironment{onelist}{%
\begin{list}{{\rm (\arabic{ctr})}\hfill}{\usecounter{ctr} \labelwidth=18pt%
\labelsep=7pt \leftmargin=25pt \topsep=2pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{2pt} }}{\end{list}}

\newenvironment{twolist}{%
\begin{list}{{\rm (\arabic{ctr}.\arabic{ectr})}%
\hfill}{\usecounter{ectr} \labelwidth=26pt%
\labelsep=7pt \leftmargin=33pt \topsep=2pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{2pt} }}{\end{list}}

\newenvironment{centerlist}{%
\begin{list}{\mbox{}}{\labelwidth=0pt%
\labelsep=0pt \leftmargin=0pt \topsep=10pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{10pt} }}{\end{list}}

\newenvironment{newcenter}[1]{\begin{centerlist}\centering%
\item #1}{\end{centerlist}}

\newenvironment{codecenter}[1]{\begin{small}\begin{centerlist}\centering%
\item #1}{\end{centerlist}\end{small}}

\ifnum\shownotes=1
\newcommand{\authnote}[2]{{\textcolor{red}{\textsf{#1 notes: }\textcolor{blue}{ #2}}\marginpar{\textcolor{red}{\textbf{!!!!!}}}}}
\else
\newcommand{\authnote}[2]{}
\fi
\newcommand{\bnote}[1]{{\authnote{Ben}{#1}}}
\newcommand{\lnote}[1]{{\authnote{Leo}{#1}}}
\newcommand{\rnote}[1]{{\authnote{Ran}{#1}}}
\newcommand{\onote}[1]{{\authnote{Omer}{#1}}}

\newcommand{\ve}{\vect{e}}
\newcommand{\vm}{\vect{m}}
\newcommand{\vy}{\vect{y}}
\newcommand{\vE}{\vect{E}}
\newcommand{\vS}{\vect{S}}
\newcommand{\vA}{\vect{A}}
\newcommand{\vc}{\vect{c}}
\newcommand{\vW}{\vect{W}}
\newcommand{\vQ}{\vect{Q}}
\newcommand{\vR}{\vect{R}}
\newcommand{\vU}{\vect{U}}
\newcommand{\vT}{\vect{T}}
\newcommand{\vX}{\vect{X}}
\newcommand{\vB}{\vect{B}}
\newcommand{\vz}{\vect{z}}
\newcommand{\vd}{\vect{d}}
\newcommand{\vs}{\vect{s}}
\newcommand{\vx}{\vect{x}}
\newcommand{\va}{\vect{a}}
\newcommand{\vb}{\vect{b}}
\newcommand{\vgamma}{\mathbf{\Gamma}}
\newcommand{\vt}{\vect{t}}
\newcommand{\vu}{\vect{u}}
\newcommand{\vF}{\vect{F}}
\newcommand{\recout}{x}
\newcommand{\ignore}[1]{}
\newcommand{\M}{\mathcal{M}}
\newcommand{\Vol}{\mathsf{Vol}}

%Figure spacing
%\renewcommand{\topfraction}{0.85}
%\renewcommand{\textfraction}{0.1}
%\renewcommand{\floatpagefraction}{.75}

%General purpose macros
\newcommand{\addbigskip}{\addvspace{\bigskipamount}}
\newcommand{\addmedskip}{\addvspace{\medskipamount}}


\renewcommand{\topfraction}{1}
\renewcommand{\textfraction}{0}
\renewcommand{\textfraction}{0}
\setcounter{totalnumber}{4}
\setcounter{topnumber}{4}

\title{Strong Key Derivation From Noisy Sources}
\subtitle{Benjamin W. Fuller}
\author{Ph.D. Dissertation Prospectus}
\institute{Department of Computer Science}

\begin{document}

\maketitle

\section{Introduction}
In today's online world, people's personal information is distributed among many services.  Private details such as health records, bank accounts, and relationship status are stored online.  A service storing sensitive details should authenticate a user's identity before granting access to resources.  The standard mechanism for authenticating identity is a password  shared between a user and the service.

Passwords are easy to deploy, update, and revoke.  However, passwords have a significant weakness.  Ideally, passwords would consist of random characters to make password guessing infeasible, however, there is a strong tradeoff between password strength and memorability~\cite{yan2004password,weir2010testing}.  
Large-scale system compromises have revealed large files of hashed password, allowing attackers to perform brute-force guessing attacks against passwords~\cite{passwordProject}.  There is strong evidence that the average user's password can be guessed by a determined attacker~\cite{weir2010testing}.  The \emph{entropy}~(uncertainty) of authentication information is critical. 

There are two natural alternatives to passwords, something the user \emph{has} or something the user \emph{is}~\cite{kim2011method}.
We collective refer to one of these alternatives as a source.  While many sources have higher entropy than passwords, they present a new problem.  Sources instantiated from physical phenomena are often \emph{noisy}~\cite{daugman2004,monrose2002password,pappu2002physical,tuyls2006puf}.
  That is, repeated readings from the same physical source are close (according to some distance metric) but not identical.  Deriving strong (and consistent) keys from noisy sources is an important problem.

Dodis, Ostrovsky, Reyzin, and Smith~\cite{DBLP:journals/siamcomp/DodisORS08} designed fuzzy extractors to derive keys from noisy sources.  Let $w$ represent an initial reading of the source and $w'$ a nearby reading.  A fuzzy extractor consists of two algorithms. Generate~(\gen) takes $w$ as an input, and produces $key$ and some helper information $p$.  The second algorithm Reproduce~(\rep) takes a nearby reading $w'$ and the helper information, $p$.  \rep and \gen should produce the same $key$ if $w$ and $w'$ are close enough.  History has shown that stored authentication information is often compromised, so the $key$ should be cryptographically strong even if an attacker knows the helper data $p$.

Fuzzy extractors must reveal some information about the initial reading $w$ in order to accept nearby $w'$.  This limits the length of the derived key~(see~\cite[Appendix C]{DBLP:journals/siamcomp/DodisORS08}).  Fuzzy extractors are closely tied to error-correcting codes.  Error-correcting codes are well-studied and we have tight bounds on the best codes.  These bounds translate to upper bounds on the length keys derived from fuzzy extractors.

Unfortunately, these bounds have kept fuzzy extractors from being useful on sources of practical importance.  As an example, the human iris is thought to be strongest biometric~\cite{daugman2004} and fuzzy extractors provide no guarantee about the strength of a key derived from the human iris.  The goal of this thesis is to provide meaningful relaxations of fuzzy extractors, enabling strong key derivation for a larger set of noisy sources. 

\subsection{Overview of Contributions}
Traditionally, fuzzy extractors were defined information-theoretically.  However, there is no compelling reason that the key must be secure against unbounded adversaries.  In this thesis, we will explore a computationally-secure version of fuzzy extractors and ask whether they can improve key strength.  This exploration will be primarily drawn from three works.  

The first work explores entropy in the computational setting and how it is affected by public information~\cite{FR11,fuller2012unified,fuller2013unified}.  The definitions and results on computational entropy are crucial in our subsequent discussion.  This work includes an important~(albeit independent) application called deterministic public-key encryption~(defined in~\cite{DBLP:conf/crypto/BellareBO07}).  We cover this work in more detail in \secref{sec:unified approach}.

The second work asks whether computational versions of fuzzy extractors are subject to the same bounds on key length as information-theoretic fuzzy extractors~\cite{fuller2013computational}.  We show both positive and negative results.  First, we show that the traditional method of constructing fuzzy extractors is unlikely to improve in the computational setting.  However, we show that using other construction paradigms it is possible to overcome information-theoretic bounds.  In particular, we describe a computational fuzzy extractor with a key as long as the input entropy~(impossible in the information-theoretic setting).  We describe this work in \secref{sec:comp fuzz ext}.

In the third work~\cite{canetti2014key}, we observe that current approaches for constructing fuzzy extractors consider two parameters: the entropy and error rate of the source.  Using only these two properties may be unnecessarily limiting.  As an example, sources often are ``well-spread'' in the metric space.  This may allow stronger key derivation than for the ``worst'' source with a given entropy and error rate.  We provide the first constructions of computational fuzzy extractors secure for a large class of distributions with more errors than starting entropy.  We discuss this work in \secref{sec:key derivation}.

\section{Computational Entropy and Information Leakage~\cite{FR11,fuller2012unified,fuller2013unified}}
\label{sec:unified approach}
\subsection{Problem Motivation and Description}
To define computational fuzzy extractors, we must understand what it means for $w$ to have entropy~(uncertainty) conditioned on the helper data $p$.  In the information-theoretic cryptography, min-entropy is usually the right notion.  Min-entropy bounds the most likely outcome of a probability distribution.  Min-entropy has natural conditional properties.  If a distribution has min-entropy, conditioning on a value $p$ reduces the (average)~min-entropy by at most $|p|$ bits~\cite[Lemma 2.2]{DBLP:journals/siamcomp/DodisORS08}.  

In the computational setting, there are two commonly used notions of entropy.  We present the conditional versions of these notions here~(in the subsequent discussion, we measure remaining entropy conditioned on the helper value, $p$).  The first is based on an indistinguishability game.  A distribution $W$ has \emph{HILL entropy} if it cannot be distinguished, given $p$, from a distribution $X$ with actual min-entropy~\cite{DBLP:journals/siamcomp/HastadILL99}.  The second notion is based on prediction probability.  A distribution $W$ has \emph{unpredictability entropy} if given $p$, all polynomial time machines predict $w$ with small probability~\cite{DBLP:conf/eurocrypt/HsiaoLR07}.

\subsection{Contributions}
In this work, we provide connections between several notions of computational entropy.  Additionally, we show that~(under the right conditions) conditional HILL entropy degrades similarly to min-entropy.  That is, if $W$ has HILL entropy, conditioning on $p$ reduces HILL entropy by at most $|p|$ bits.  This is one of a number of ``chain-rules'' for computational entropy~\cite{reingold2008dense,DBLP:conf/focs/DziembowskiP08,chung2011memory,gentry2011separating}.

This result was applied to deterministic public-key encryption.  A deterministic public-key encryption is a public-key encryption where the encryption algorithm is deterministic~\cite{DBLP:conf/crypto/BellareBO07}.  In deterministic public-key encryption, security of the encryption derives from the entropy of the messages.  In this work, we provide a generalization of several previous schemes, unifying and simplifying previous constructions.  We also provide the first scheme that encrypts a bounded number of arbitrarily correlated messages.
\section{Computational Fuzzy Extractors~\cite{fuller2013computational}}
\label{sec:comp fuzz ext}
\subsection{Problem Motivation and Description}
Fuzzy extractors share a deep connection to error-correcting codes.  Dodis et al. defined fuzzy extractors information-theoretically~\cite{DBLP:journals/siamcomp/DodisORS08}.  This was natural as their primary tools were error-correcting codes and randomness extractors~\cite{nisan1993randomness}, both information-theoretic objects.  Intuitively, these two objects perform two separate functions.  The error-correcting code performs information-reconciliation: mapping the new reading $w'$ back to the original reading $w$ without revealing $w$~\cite{bennett1988privacy}.  The randomness extractor performs privacy amplification: mapping $w$ to a uniform key~\cite{bennett1988privacy}.  However, there is no compelling reason that fuzzy extractors should be secure against unbounded adversaries.  Producing a key that is indistinguishable from a truly random key~(pseudorandom) is ``as good'' when considering computationally-limited adversaries.

\subsection{Contributions}
This work builds computational fuzzy extractors that outperform the best information-theoretic fuzzy extractors.  
We provide three contributions:
\begin{itemize}
\item Show that computationally-secure information-reconciliation components cannot improve over standard information-reconciliation components.  Information-reconciliation components represent the primary limitation on key length from fuzzy extractors.  Thus, the standard paradigm of information-reconciliation followed by privacy amplification is unlikely to improve in the computational setting.  We show negative results for both HILL and unpredictability entropy.
\item Suggest alternative construction paradigms for computational fuzzy extractors: 
\subitem Combine the information-reconciliation and privacy amplification components or
\subitem Use a fuzzy conductor~\cite{KanukurthiR09} and an extractor.
\item Construct a computationally fuzzy extractor where the length of the key is as long as the input entropy.  This is impossible in the information-theoretic setting.
\end{itemize}

\section{Key Derivation from Noisy Sources with More Errors Than Entropy~\cite{canetti2014key}}
\label{sec:key derivation}
\subsection{Problem Motivation and Description} 
The previous work provided evidence that computational fuzzy extractors can outperform information-theoretic fuzzy extractors.  While we constructed a lossless computational fuzzy extractor, it is secure for a limited class of distributions.  In fact, our construction is secure for distributions that already have information-theoretic constructions of fuzzy extractors.  Although we improve parameters using computational techniques, our goal is to expand the class of distributions where strong key derivation is possible.  In the computational setting a ``short'' key can be expanded using standard techniques~\cite{krawczyk2010cryptographic,dachman2012computational}.  Key length is not as important as the class of distributions where some key is possible.

Constructions of fuzzy extractors are limited by the tension between security and correctness guarantees: if we allow for higher error tolerance $t$, then we also need higher starting entropy $m$. The reason for this tension is simple: if an adversary who knows $p$ can guess any $w'$ within distance $t$ of $w$, it can easily obtain $key$ by running $\rep$.  In fact, if $t$ is high enough that there are $2^m$ points in a ball of radius $t$, then there exists a distribution of $w$ of min-entropy $m$ \emph{contained entirely in a single ball}.  For this distribution, an adversary can run $\rep$ on the center of this ball and learn $key$.

More generally, let $B_t$ denote the number of points in a ball of radius $t$.  We call $m-\log B_t$ the \emph{minimum usable} entropy, denoted by $\Huse$. The previous paragraph shows that  no fuzzy extractor can handle all distributions of a given min-entropy $m$ if  $\Huse\le 0$.

Candidate sources for authentication have $\Huse\le 0$.  The iris is believed to be the best biometric for high security applications~\cite{prabhakar2003biometric}.  Daugman uses specialized wavelets to derive a $2048$ bit string called an iris code~\cite{daugman2004}.  Let the outcome of this transform~(on different irises) define a distribution $w$.  Daugman estimates this distribution contains $249$ bits of entropy.  The precise number of errors that must be tolerated depends on the desired false reject rate (how often the correct key is produced).  For a false reject rate of $\le 80\%$, a $t$ of approximately $205$ is required.  We have the following calculation for $\Huse$:
\[
\Huse = \Hoo(W) - \log |B_t|
= 249 - \log \sum_{i=0}^{205} {2048 \choose i} \approx -707.
\]
Thus, distributions with $\Huse\le 0$ are important in practice.  In order to secure fuzzy extractors for distributions with $\Huse\le 0$, one must use some additional property of the distribution.
\subsection{Contributions}
We provide the first constructions of computational fuzzy extractors secure for a large class of distributions with $\Huse\le 0$ over $\mathcal{Z}^\ell$ for a large alphabet $\mathcal{Z}$.  Our constructions are in the Hamming metric~(the number of symbols of $w$ and $w'$ that are different).  As explained above, such constructions cannot work without some restriction on the distribution.
Our first construction is secure when symbols in $w$
each have individual super-logarithmic min-entropy, even if they are arbitrarily correlated. Moreover,
a constant fraction of symbols in $w$ may have little entropy, as long as knowledge of their values does not reduce the entropy of the high-entropy symbols too much.

We improve the entropy requirement in the second construction, which requires only a constant fraction of the symbols of $w$ to have constant min-entropy conditioned on the previous symbols.
However, this improvement comes at a price to error-tolerance: whereas the first construction tolerates a constant fraction of errors, the second construction tolerates $\ell/\omega(\log\ell)$ errors.


\subsubsection{Possible additional work for thesis} 
\begin{itemize}
\item Both of our constructions are based on a definition of point obfuscation~\cite{canetti1997towards} achievable under strong number-theoretic assumptions~\cite{bitansky2010strong}.  Using randomness extraction, we should be able to weaken this assumption to more standard number theoretic assumptions.
\item Our constructions work for a large alphabet $\mathcal{Z}$.  However, most sources in the Hamming metric use a small alphabet.  Reducing our alphabet size will help make our result practical.
\item We construct computational fuzzy extractors for a limited class of distributions.  Recall this is necessary when $\Huse\le 0$.  It may be possible to construct information-theoretic fuzzy extractors for distributions in this class.
\end{itemize}



\bibliographystyle{alpha}
%\bibliography{./my}
\bibliography{crypto}

\end{document}

