\documentclass[11pt,a4paper,notitlepage]{article}

\usepackage{fullpage}

\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage[noend]{algorithmic}
\usepackage{url}
\usepackage{hyperref}

\newcommand{\xor}{\oplus}
\newcommand{\bigxor}{\bigoplus}
\newcommand{\bitset}{\{0,1\}}
\newcommand{\ip}[1]{\langle #1 \rangle}

\newcommand{\from}{\leftarrow}
\newcommand{\dist}{\sim}
\newcommand{\ul}{\underline}
\newcommand{\ol}{\overline}

\newcommand{\RR}{\mathbb{R}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\F}{\mathbb{F}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\PP}{{\cal P}}
\newcommand{\NP}{{\cal NP}}
\newcommand{\coNP}{co-{\cal NP}}
\newcommand{\BPP}{{\cal BPP}}
\newcommand{\PH}{{\cal PH}}
\newcommand{\Ppoly}{\PP / poly}
\newcommand{\even}{{\rm{even}}}
\newcommand{\odd}{{\rm{odd}}}
\renewcommand{\neg}{{\rm neg}}
\newcommand{\cond}{\Big\vert}
\newcommand{\etal}{\textit{et~al.}}

\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\vectornorm}[1]{\left|\left|#1\right|\right|}
\newcommand{\eqdef}{\stackrel{\rm def}{=}}
\newcommand{\poly}{{\rm poly}}
\newtheorem{thm}{Theorem}
\newtheorem{claim}[thm]{Claim}
\newtheorem{construction}[thm]{Construction}
\newtheorem{conjecture}[thm]{Conjecture}
\newtheorem{assumption}[thm]{Assumption}
\newtheorem{corr}[thm]{Corollary}
\newtheorem{dfn}[thm]{Definition}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{rem}[thm]{Remark}
\newtheorem{lemma}[thm]{Lemma}

\newcommand{\rnote}[1]{{(\sf Ron's Note:} {\sl{#1}} {\sf )}}
\newcommand{\gnote}[1]{{(\sf Guy's Note:} {\sl{#1}} {\sf )}}

\DeclareMathOperator*{\prob}{{\rm Pr}}
\DeclareMathOperator*{\E}{{\mathbf E}}


\DeclareMathOperator*{\Var}{{\mathbf Var}}


\newcommand{\round}[1]{\lfloor #1 \rceil}
\newcommand{\rounddown}[1]{\lfloor #1 \lceil}
\newcommand{\roundup}[1]{\lceil #1 \rceil}

\begin{document}
\title{Pseudorandomness - Exercise 3}
\author{Guy Katz \and Ron Rothblum}

\maketitle

\section*{Question 1}
\paragraph{a.}
We construct an extractor by interpreting the input $x \in \bitset^n$
of the weak source as a vertex in the graph and interpreting the seed
$d \in_R [D]^t$ as a random walk of length $t$ starting at $x$.

To prove that the construction is a good extractor, we need to bound
the statistical distance of the output of the extractor from the
uniform distribution. Actually, it will be convenient for us to bound
the $\ell_2$ (rather than the $\ell_1$) distance from uniform and then
to use the latter in order to bound the $\ell_1$ distance.

We denote by $G=(V,E)$ the expander graph and by $A$ its normalized
adjacency matrix. Let $\mu$ be some probability distribution over the
vertices of the graph with min-entropy $k$. Note that the output
distribution of the expander after $t$ steps is exactly $A^t \mu$. We
proceed to bound the $\ell_2$ distance from the uniform distribution,
which we denote by $u$.

Our first observation is that $A^tu = u$ and therefore,
$\vectornorm{A^t\mu-u}_2 = \vectornorm{A^t(\mu-u)}_2$. However,

\begin{align*}
  \frac{\vectornorm{A^t(\mu-u)}_2}{\vectornorm{\mu-u}_2} \leq
  \max_{\substack{v \neq 0\\v \bot \vec{1}}}
  \frac{\vectornorm{A^tv}_2}{\vectornorm{v}_2} = \lambda_2^t = 2^{-t}
\end{align*}
We proceed to bound $\vectornorm{\mu-u}_2$:
\begin{align*}
  \vectornorm{\mu-u}^2_2 = \sum_{v \in V}(\mu(v)-u(v))^2 = \sum_{v \in
    V}\mu(v)^2 - 2^{-n} \leq \sum_{v \in V} \mu(v) \left( \max_{v' \in
      V} \mu(v') \right) - 2^{-n} = 2^{-k} - 2^{-n} \leq 2^{-k}
\end{align*}
Hence,
\begin{align*}
  \vectornorm{A^t\mu-u}^2_2 \leq 2^{-2t} 2^{-k}
\end{align*}
which by the Cauchy-Schwarz inequality implies that
\begin{align*}
  \vectornorm{A^t\mu-u}_1^2 &\leq 2^n 2^{-2t-k}
\end{align*}
\noindent and if we set $t=\frac1{2}(n-k+\log(\frac1{\epsilon^2}))$ then
we obtain:
\begin{align*}
  \vectornorm{A^t\mu-u}_1^2 &\leq \epsilon^2
\end{align*}
or in other words, the output of the extractor is $\epsilon$-close to
uniform when using a seed of length $|d|=(\log D) t =
O(n-k+\log \frac1{\epsilon})$.

\paragraph{b.}
Consider the joint distribution $\nu$ of the seed of the extractor
with the extracted value. Notice that $\nu$ is distributed over
strings of length $n+d$ bits but has a support size of at most
$2^{k+d}$. The uniform distribution on the other hand has a support of
size $2^{n+d}$ and therefore the statistical test which checks if the
string is outside the support of $\nu$ distinguishes between $\nu$ and
the uniform distribution with a gap of $1-2^{k-n}$ and therefore the
extractor cannot be strong.

\section*{Question 2}
Let $f$ be a function that is computable by a width 2 branching
program. We will first show that the $\ell_1$-norm of $\hat{f}$ is
bounded and then show that this implies that $f$ is fooled by small
bias sets. It will be convenient for us to think of $f$ as a function
mapping $\bitset^n$ to $\{ \pm 1\}$ and each state of the branching
program as either $+1$ or $-1$.

For each $i \in \{1,\dots,n\}$ let $h_i : \{ \pm 1\}^2 \to \{ \pm 1\}$
denote the transition function of the branching program, that is, if
at step $i$ the branching program is at position $z \in \{ \pm 1\}$
and it gets as input $\tau \in \bitset$ then it moves to position
$h_i(z,(-1)^\tau)$. Also, denote by $f_0$ the function that is
identically $1$ and for $i \in [n]$ by $f_{i} : \bitset^i \to \{ \pm
1\}$ the function defined as $f_i(x \sigma) \eqdef h_i \left(
  f_{i-1}(x),(-1)^\sigma \right)$ where $x \in \bitset^{i-1}$ and
$\sigma \in \bitset$. Clearly, $f=f_n$.

Before proceeding to the main proof, we will proof a useful claim that
shows that each $h_i$ can be expressed as a bilinear form with small coefficients:
\begin{claim}
  For every $i \in [n]$, the function $h_i$ can be expressed as a
  bilinear form $h_i(x,y)=\sum_{j,k \in \bitset} \alpha^{(i)}_{j,k}
  x^j y^k$ over the reals such that $|\alpha^{(i)}_{0,0}| +
  |\alpha^{(i)}_{0,1}| \leq 1$ and
  $|\alpha^{(i)}_{1,0}|+|\alpha^{(i)}_{1,1}| \leq 1$.
\end{claim}
\begin{proof}
  Fix $i$ and a function $h_i$. We are looking for values
  $\alpha_{j,k}$ such that:
  \[
  \begin{bmatrix}
    &h_i(1,1) \\
    &h_i(1,-1) \\
    &h_i(-1,1) \\
    &h_i(-1,-1)
  \end{bmatrix}=
  \begin{bmatrix}
    &1 &1 &1 &1 \\
    &1 &-1 &1 &-1 \\
    &1 &1 &-1 &-1 \\
    &1 &-1 &-1 &1
  \end{bmatrix}
  \begin{bmatrix}
    &\alpha^{(i)}_{0,0} \\
    &\alpha^{(i)}_{0,1} \\
    &\alpha^{(i)}_{1,0} \\
    &\alpha^{(i)}_{1,1}
  \end{bmatrix}
  \]
  which has a single solution
  \[
  \begin{bmatrix}
    &\alpha^{(i)}_{0,0} \\
    &\alpha^{(i)}_{0,1} \\
    &\alpha^{(i)}_{1,0} \\
    &\alpha^{(i)}_{1,1}
  \end{bmatrix}
  =
  \frac1{4}
    \begin{bmatrix}
    &1 &1 &1 &1 \\
    &1 &-1 &1 &-1 \\
    &1 &1 &-1 &-1 \\
    &1 &-1 &-1 &1
  \end{bmatrix}
  \begin{bmatrix}
    &h_i(1,1) \\
    &h_i(1,-1) \\
    &h_i(-1,1) \\
    &h_i(-1,-1)
  \end{bmatrix}
  \]
  with the desired properties.
\end{proof}

We proceed to analyze the Fourier coefficients of $f$. Fix $i \in [n]$
and $x \in \bitset^i$. We denote by $x'$ the $i-1$ bit prefix of $x$
and by $\sigma$ the single bit suffix of $x$ (i.e.,
$x=x'\sigma$). Then we have:

\begin{align*}
  \hat{f}_i(x) &= \E_{y \in
    \bitset^i}[ f_i(y)\chi_{x}(y) ] \\
  &= \E_{\substack{y' \in \bitset^{i-1}\\ \tau \in
      \bitset}}\left[ f_i(y' \tau)\chi_{x'}(y') \chi_{\tau}(\sigma) \right] \\
  &= \E_{\substack{y' \in \bitset^{i-1}\\ \tau \in \bitset}}\left[ h_i
    \left( f_{i-1}(y'),(-1)^\tau \right)\chi_{x'}(y')
    \chi_{\tau}(\sigma) \right]
  \\
  &= \E_{\substack{y' \in \bitset^{i-1}\\ \tau \in \bitset}}\left[
    \sum_{j,k \in \bitset} \alpha^{(i)}_{j,k}f_{i-1}^j(y')(-1)^{\tau
      k}\chi_{x'}(y') \chi_{\tau}(\sigma) \right].
\end{align*}
Using linearity of expectation and the fact that $y'$ and $\tau$ are
independent we obtain:
\begin{align*}
  \hat{f}_i(x) &= \sum_{j,k \in \bitset} \alpha^{(i)}_{j,k} \E_{y' \in
    \bitset^{i-1}}\left[f_{i-1}^j(y')\chi_{x'}(y')\right] \E_{\tau \in
    \bitset}\left[ (-1)^{\tau k}(-1)^{\tau \sigma}\right].
\end{align*}
Note that the second expectation equals 0 if $k \neq \sigma$ and 1
otherwise and so:
\begin{align*}
  \hat{f}_i(x) = \sum_{j \in \bitset} \alpha^{(i)}_{j,\sigma} \E_{y' \in
    \bitset^{i-1}}\left[f_{i-1}^j(y')\chi_{x'}(y')\right] =
  \alpha^{(i)}_{0,\sigma} \E_{y' \in
    \bitset^{i-1}}\left[\chi_{x'}(y')\right] + \alpha^{(i)}_{1,\sigma}
  \hat{f}_{i-1}(x')
\end{align*}
and thus:
\begin{align*}
  \hat{f}_i(x'\sigma) =
  \begin{cases} \alpha^{(i)}_{0,\sigma} + \alpha^{(i)}_{1,\sigma}
    \hat{f}_{i-1}(0^{i-1}) & x' = 0^{i-1} \\
    \alpha^{(i)}_{1,\sigma} \hat{f}_{i-1}(x') & otherwise
  \end{cases}.
\end{align*}
Using an inductive argument we are ready to prove the following claim:
\begin{claim}
  For every $i \in \{0,\dots,n\}$:
  \[
  \sum_{x \in \bitset^i} |\hat{f}_i(x)| \leq i
  \]
\end{claim}
\begin{proof}
  The base case is trivial. For the inductive step, assume that the
  claim holds for some $i$. Then:
  \begin{align*}
    \sum_{x \in \bitset^{i+1}} |\hat{f}_{i+1}(x)| &= \sum_{x' \in
      \bitset^i} \left( |\hat{f}_{i+1}(x'0)| + |\hat{f}_{i+1}(x'1)|
    \right) \\
    &= |\hat{f}_{i+1}(0^{i+1})| + |\hat{f}_{i+1}(0^i1)| +
    \sum_{\substack{x' \in \bitset^i\\x' \neq 0^i}} \left(
      |\hat{f}_{i+1}(x'0)| + |\hat{f}_{i+1}(x'1)| \right)
  \end{align*}
  and by our analysis above and inductive assumption we obtain:
  \begin{align*}
    \sum_{x \in \bitset^{i+1}} |\hat{f}_{i+1}(x)| &= \left(
      |\alpha^{(i+1)}_{0,0}| + |\alpha^{(i+1)}_{0,1}| \right) +
    \left(|\alpha^{(i+1)}_{1,0}|+|\alpha^{(i+1)}_{1,1}| \right) \cdot \sum_{x' \in
      \bitset^i}
    |\hat{f}_{i}(x')| \leq 1 + 1 \cdot i
  \end{align*}
\end{proof}

Thus, we have that the $\ell_1$ norm of $\hat{f}=\hat{f}_n$ is at most
$n$. The following claim shows that this implies that $f$ is
$\epsilon \cdot n$ fooled by $\epsilon$-biased sets.

\begin{claim}
  Let $S \subseteq \bitset^n$ be an $\epsilon$-biased set and let $f :
  \bitset^n \to \{ \pm 1\}$ be a function such that
  $\vectornorm{\hat{f}}_{1} \leq t$ then $f$ is $\frac{\epsilon \cdot
    t}{2}$-fooled by $S$.
\end{claim}
\begin{proof}
  Observe that:
  \begin{align*}
    \left| \E_{x \in S} \left[ f(x) \right] - \E_{x \in \bitset^n}
      \left[ f(x) \right] \right| &= \left| \E_{x \in S} \left[ \sum_y
        \hat{f}(y) \chi_y(x) \right] - \E_{x \in \bitset^n} \left[
        \sum_y \hat{f}(y) \chi_y(x) \right]
    \right|\\
    &\leq \sum_y \left| \hat{f}(y) \left( \E_{x \in S} \left[
          \chi_y(x) \right] -
        \E_{x \in \bitset^n} \left[ \chi_y(x) \right] \right)
    \right|.
  \end{align*}
  For $y=0^n$ we have that $\E_{x \in S} \left[ \chi_y(x) \right] =
  \E_{x \in \bitset^n} \left[ \chi_y(x) \right] = 1$ and for $y \neq
  0^n$ we have that $\E_{x \in \bitset^n} \left[ \chi_y(x) \right]=0$
  and $\left| \E_{x \in S} \left[ \chi_y(x) \right] \right| <
  \epsilon$ and therefore we obtain:
  \begin{align*}
    \left| \E_{x \in S} \left[ f(x) \right] - \E_{x \in \bitset^n}
      \left[ f(x) \right] \right| \leq \sum_{y \neq 0^n} \left|
      \hat{f}(y) \left( \E_{x \in S} \left[ \chi_y(x) \right] \right)
    \right| \leq \epsilon \sum_{y \neq 0^n} \left| \hat{f}(y) \right|
    \leq \epsilon \cdot t.
  \end{align*}
  Hence,
  \[
  \left| \prob_{x \in S}[ f(x) = 1 ] - \prob_{x \in \bitset^n}[ f(x) =
    1 ] \right| = \frac1{2} \left| \E_{x \in S} \left[ f(x) \right] -
    \E_{x \in \bitset^n} \left[ f(x) \right]\right| \leq
  \frac{\epsilon \cdot t}{2}
  \]
\end{proof}

\noindent Therefore, using a construction of an
$\epsilon'=\frac{2\epsilon}{n}$ biased set, the function $f$ is
$\epsilon$ fooled. As we saw in class, such a set of size
$O(\frac{n}{(\epsilon')^3}) = O(\frac{n^4}{\epsilon^3})$ can be
constructed which implies a seed of length
$O(\log\frac{n}{\epsilon})$.

\end{document}
