\documentclass[11pt,a4paper,notitlepage]{article}

\usepackage{setspace}
\usepackage{fullpage}

\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage[noend]{algorithmic}
\usepackage{url}
\usepackage{hyperref}

\newcommand{\xor}{\oplus}
\newcommand{\bigxor}{\bigoplus}
\newcommand{\bitset}{\{0,1\}}
\newcommand{\ip}[1]{\langle #1 \rangle}

\newcommand{\from}{\leftarrow}
\newcommand{\dist}{\sim}
\newcommand{\ul}{\underline}
\newcommand{\ol}{\overline}

\newcommand{\RR}{\mathbb{R}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\F}{\mathbb{F}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\PP}{{\cal P}}
\newcommand{\NP}{{\cal NP}}
\newcommand{\coNP}{co-{\cal NP}}
\newcommand{\BPP}{{\cal BPP}}
\newcommand{\PH}{{\cal PH}}
\newcommand{\Ppoly}{\PP / poly}
\newcommand{\even}{{\rm{even}}}
\newcommand{\odd}{{\rm{odd}}}
\renewcommand{\neg}{{\rm neg}}
\newcommand{\cond}{\Big\vert}
\newcommand{\etal}{\textit{et~al.}}

\newcommand{\abs}[1]{\left| #1 \right|}
\newcommand{\vectornorm}[1]{\left|\left|#1\right|\right|}
\newcommand{\eqdef}{\stackrel{\rm def}{=}}
\newcommand{\poly}{{\rm poly}}
\newtheorem{thm}{Theorem}
\newtheorem{claim}[thm]{Claim}
\newtheorem{construction}[thm]{Construction}
\newtheorem{conjecture}[thm]{Conjecture}
\newtheorem{assumption}[thm]{Assumption}
\newtheorem{corr}[thm]{Corollary}
\newtheorem{dfn}[thm]{Definition}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{rem}[thm]{Remark}
\newtheorem{lemma}[thm]{Lemma}

\newcommand{\rnote}[1]{{(\sf Ron's Note:} {\sl{#1}} {\sf )}}
\newcommand{\gnote}[1]{{(\sf Guy's Note:} {\sl{#1}} {\sf )}}

\DeclareMathOperator*{\prob}{{\rm Pr}}
\DeclareMathOperator*{\E}{{\mathbf E}}


\DeclareMathOperator*{\Var}{{\mathbf Var}}


\newcommand{\round}[1]{\lfloor #1 \rceil}
\newcommand{\rounddown}[1]{\lfloor #1 \lceil}
\newcommand{\roundup}[1]{\lceil #1 \rceil}

\begin{document}
\title{Pseudorandomness - Exercise 2}
\author{Guy Katz \and Ron Rothblum}

\maketitle

\section*{Question 1}

\paragraph{(a)}
If $\PP=\NP$ then $\NP=\coNP$ (since $\PP$ is closed under complement) and
therefore, by (2a) $\PH=\Sigma_1=\NP=\PP$.

\paragraph{(b)}
We know that $\PP\subseteq \BPP \subseteq \PH$. Therefore, by (a), if
$\PP=\NP$ then $\PP=\PH$ and thus $\PP=\BPP$.

\section*{Question 2}
In the following whenever we write $\exists x$ (resp. $\forall x$) we
actually mean that there exists (resp. forall) $x$ of
\emph{polynomial} length.

\paragraph{(a)}
Suppose that $\Sigma_i=\Pi_i$ for some $i\geq 1$. We show that
$\Sigma_{i+1}=\Sigma_i$ and analogously that $\Pi_{i+1}=\Pi_i$. Thus,
by induction, $\PH=\Sigma_i$.

Let $L \in \Sigma_{i+1}$. Then there exists a polynomial-time TM $M$
such that:
\[
x \in L \Longleftrightarrow \exists y_1 \forall y_2 ,\dots, y_{i+1}
\text{ such that } M(x,y_1,\dots,y_{i+1})=1.
\]
Consider the language $L'=\left\{ (x,y_1) : \forall y_2, \exists
  y_3,\dots y_{i+1} \text{ such that }M(x,y_1,y_2,\dots,y_{i+1})=1
\right\}$. Clearly $L' \in \Pi_i$ and therefore by our assumption it
can also be computed in $\Sigma_i$. Thus, there exists a
polynomial-time TM $M'$ such that:

\[
x,y_1 \in L' \Longleftrightarrow \exists z_2, \forall z_3, \dots, z_i
\text{ it holds that } M'(x,y_1,z_2,\dots,z_i)=1
\]

\noindent Observe that this implies that $L \in \Sigma_i$ since:
\begin{align*}
  x \in L
  &\Longleftrightarrow
  \exists y_1 \forall y_2,\dots, y_{i+1} \text{ it holds that }
  M(x,y_1,\dots,y_{i+1})=1 \\
  &\Longleftrightarrow
  \exists y_1 \text{ such that } (x,y_1) \in L'\\
  &\Longleftrightarrow
  \exists y_1,z_2,\forall z_3,\dots,z_i \text{ it holds that }
  M'(x,y_1,z_2,\dots,z_i)=1
\end{align*}

And so we have $\Sigma_{i+1} \subseteq \Sigma_i$ and therefore
$\Sigma_{i+1}=\Sigma_i$. Analogously we have $\Pi_{i+1}=\Pi_i$ and
hence $\Pi_{i+1}=\Pi_i=\Sigma_i=\Sigma_{i+1}$. By induction we have
that that $\Sigma_j=\Pi_j$ for all $j\geq i$ and therefore,
$\PH=\Sigma_i$.

\paragraph{(b)}
Let $L \in \Pi_2$, we first show that $L \in \Sigma_2$ and so $\Pi_2
\subseteq \Sigma_2$. By definition of $\Pi_2$, there exists a
polynomial-time TM $M$ such that:
\[
x \in L \Longleftrightarrow \forall y \exists z \text{ such that } M(x,y,z)=1.
\]
Let $L'=\{(x,y) : \exists z \ |\ M(x,y,z)=1\}$. Clearly $L' \in \NP$
and therefore, by the Cook-Levin theorem, there exists a polynomial
size formula $\phi$ such that $(x,y) \in L'$ if and only if there
exists $z$ such that $\phi(x,y,z)=1$. By our assumption, there exists
a family of polynomial-size circuits $\{C_n\}_{n \in \N}$ that decides
SAT. Furthermore, by a straightforward NP self-reduction there exists
a circuit family $\{C'_n\}_{n \in \N}$ that given a SAT formula $\phi$
outputs a satisfying assignment if one exists and $\bot$
otherwise.\footnote{On input $\phi$, the circuit $C'_{|\phi|}$ first
  uses $C_{|\phi|}(\phi)$ to check if the formula is satisfiable. If
  not then it outputs $\bot$ and otherwise it sets the first variable
  to 0 and checks whether the resulting formula is still
  satisfiable. If not then it sets the first variable to 1 and
  proceeds to set the second variable to 0 and so forth.} We use
$\{C'_n\}_{n \in \N}$ to show that $L \in \Sigma_2$:
\begin{claim}
\[
x \in L \Longleftrightarrow \exists C''_n \forall y \text{ it
  holds that }M(x,y,C''_n(x,y))=1 \text{ (where $n=\poly(|x|)$)}
\].
\end{claim}
\begin{proof}
  If $x \in L$ then the circuit family $\{C'_n\}_{n \in \N}$
  satistfies the requirement. On the other hand, if there exists
  $C''_n$ such that $\forall y M(x,y,C''_n(x,y))=1$ then for each $y$,
  the circuit $C''_n(x,y)$ outputs $z$ such that $M(x,y,z)=1$ and in
  particular for every $y$ there exists a $z$ such that $M(x,y,z)=1$
  and therefore $x \in L$.
\end{proof}

Thus, we have that $\Pi_2 \subseteq \Sigma_2$. We argue that this
implies that $\Pi_2 = \Sigma_2$. To see this, let $L \in
\Sigma_2$. Then $L^c \in \Pi_2$ and therefore (as shown above) $L^c
\in \Sigma_2$ and therefore $L \in \Pi_2$. We conclude that
$\Sigma_2=\Pi_2$ and therefore, by (a), $\PH=\Sigma_2$.

\paragraph{(c)}
Let $L \in \BPP$. As we saw in class, we can reduce the error to be
exponentially vanishing and so there exists a polynomial-time TM $M$
such that for every $x \in \bitset^*$ it holds that
\[\prob_{r \in \bitset^{\poly(|x|)}} [A(x,r) \neq L(x)] <
2^{-|x|}.
\]
Let $n \in \N$. By the union bound:
\[
\prob_{r \in \bitset^{\poly(n)}} \left[ \exists x \in \bitset^n \text{
    s.t. }A(x) \neq L(x) \right] < 2^n \cdot 2^{-n} = 1
\]
and therefore there exists (at least) one random string $r_n$ that
will make $A(x,r)=L(x)$ for all $x \in \bitset^n$. The language $A$
can therefore be decided by a polynomial-time non-uniform TM that gets
the string ensemble $\{r_n\}_{n \in \N}$ as its non-uniform advice
string.

\paragraph{(d)}
If $\NP \subseteq \BPP$ then, by (c), $\NP \subseteq \Ppoly$ and
therefore, by (b), $\PH=\Sigma_2^p$.


\section*{Question 3}
\paragraph{(a)}
Let $\Gamma_u(S)$ denote the unique neighbors of $S$, and let
$\Gamma_c(S) = \Gamma(S) - \Gamma_u(S)$ denote the ``common''
(non-unique) neighbors. Then $|\Gamma(S)| = |\Gamma_u(S)| +
|\Gamma_c(S)|$. Each vertex in $|\Gamma_u(S)|$ has one edge going into
$S$, and every vertex in $|\Gamma_c(S)|$ has at least two edges going
into $S$. Since the total number of edges going into $S$ is $d|S|$, we
have that:
\begin{align}\label{eq:numero1}
d|S| \geq |\Gamma_u(S)| + 2|\Gamma_c(S)|
\end{align}
However, by the expansion property, we also know that
\begin{align}\label{eq:numero2}
(1-\epsilon)d|S| \leq |\Gamma_u(S)| + |\Gamma_c(S)|
\end{align}
Subtracting \eqref{eq:numero2} from \eqref{eq:numero1} we obtain:
\[
|\Gamma_c(S)| \leq \epsilon d|S|
\]
% Combining, we get:
% \[
% d|S| \geq |\Gamma_u(S)| + 2|\Gamma_c(S)| \geq  |\Gamma_c(S)| +  (1-\epsilon)d|S|
% \]
% Or:
% \[
% |\Gamma_c(S)| \leq d|S| - (1-\epsilon)d|S|  = d|S|(1-1+\epsilon) =
% \epsilon d|S|
% \]
\noindent and therefore,
\[
|\Gamma_u(S)| \geq
% (1-\epsilon)d|S| - |\Gamma_c(S)| \geq
% (1-\epsilon)d|S| - \epsilon d|S| =
(1-2\epsilon)d|S|
\]

\paragraph{(b)}
We prove the claim for $\delta = 6\epsilon$. Suppose toward a
contradiction that there exists a set $S\subseteq L$ of size at most
$\frac{k}{2}$ for which the claim does not hold - i.e., there exists a
set $T$ of size at least $|S|/2$ such that $S\cap T=\phi$, and that
for each $t\in T$ we have $|\Gamma(t)\cap \Gamma(S)| \geq \delta d$.

Let $T'$ denote a set of $|S|/2$ vertices arbitrarily picked from $T$,
and let $S' = T' \cup S$. We will show that $S'$ doesn't have enough
unique neighbors, contradicting the previous section.

Each of the vertices of $T'$ has $d$ edges; since at least $\delta d$ of them
go to vertices that are already neighbors of $S$, it contributes at
most $(1-\delta)d$ unique neighbors. There are $\frac{|S|}{2}$ such
vertices, so $T'$ contributes at most $\frac{|S|}{2}(1-\delta)d$ unique
neighbors to $S'$.

By the previous section, we know that:
\[
\Gamma_u(S') \geq (1-2\epsilon)d|S'| = (1-2\epsilon)d \cdot \frac{3}{2}|S|
\]
Therefore, the number of unique neighbors that $S$ contributes to $S'$
must be at least
\[
\left( 1-2\epsilon \right) d \cdot \frac{3}{2}|S| -
\frac{|S|}{2}\left( 1-\delta \right)d =
d|S| \left( 1-3\epsilon + \frac{\delta}{2} \right) =
d|S| \left( 1-3\epsilon + \frac{6\epsilon}{2} \right) =
d|S|
\]
This means that every single edge going out of $S$ contributes a
unique neighbor to $S'$, but this contradicts the fact that $S$ and
$T'$ have common neighbors. Therefore, there exists no such $S$,
proving the claim.

\paragraph{(c)}
To check if a given element $x \in [n]$ is in $S$, we choose a random
neighbor of $x$ and read its value. Property $\Pi$ guarantees that all
but a $\delta$ fraction of the neighbors are assigned $\chi_S(x)$ and
therefore the error probability is at most $\delta$.

\paragraph{(d)}
We start by assigning $1$'s to all of $S$'s neighbors, and $0$ to all
other vertices. Clearly, property $\Pi$ holds for all vertices of
$S$. By $(b)$, there are at most $\frac{|S|}{2}$ vertices outside $S$
for which the property does not hold. Denote this set of vertices by
$B_1$; then $|B_1|\leq \frac{|S|}{2}$.

We now proceed as follows: we set all vertices in $\Gamma(B_1)$ to
$0$. The property now holds for all vertices in $B_1$; by $(b)$, there
are at most $\frac{|B_1|}{2}=\frac{|S|}{4}$ vertices in $S$ for which
the property doesn't hold now. We denote those vertices as $B_2$.

The process continues; each time we define $B_{i+1}$ to be the set of
vertices for which the property doesn't hold due to the flipping of
$B_i$'s neighbors. Each set is at most half the size of its
predecessor; and the process ends after a logarithmic number of steps
and property $\Pi$ holds for all vertices, as needed.

\section*{Question 4}
\paragraph{(a)}
Let $S\subseteq \{0,1\}^n$ be an $\epsilon$-biased set of size
$m$. Let $G$ be an $m \times n$ matrix whose columns are the elements
of $S$. We define a code $C$ as follows:
\[
C = \{G x^T |\ x\in \{0,1\}^n\}
\]
By definition, $C$ is a linear code. Since $C$ maps vectors of length
$n$ to vectors of length $m$, its rate is $\frac{n}{m}$. To find its
distance, we need to find the minimal weight of a non-zero
codeword. Let $w \in \bitset^m$ be such a word in the code and let $0
\neq x \in \bitset^n$ such that $w=Gx^T$. Observe that
\[
weight(w) = \left| \{ g \in S : \ip{x,g}=1\} \right| = n \cdot
\prob_{g \in S}[ \ip{x,g}=1]
\]
and since $S$ is $\epsilon$-biased and $x \neq 0$ we have that
$weight(w) \geq n \cdot \left( \frac1{2} - \epsilon \right)$ and
therefore the code's relative distance is at least
$\frac{1}{2}-\epsilon$, as needed.
\paragraph{(b)}
We are given that $m \leq c \cdot \frac{n}{\epsilon^3}$ for some
constant $c$. We can set $\epsilon = \frac{1}{10}$, and use the
construction from $(a)$ to build a code $C$ with rate $\frac{n}{m}
\geq \frac{n\cdot \epsilon^3}{c \cdot n} = \frac{\epsilon^3}{c} =
\frac{1}{1000c}$ and relative distance at least $\frac{1}{2} -
\frac{1}{10} = \frac{2}{5}$, both of which are constants, as needed.

\paragraph{(c)}
As usual, for a given vector $v \in \bitset^n$, we denote by $\chi_v :
\bitset^n \to \{ \pm 1 \}$ the linear function defined as $\chi_v(x) =
(-1)^{\ip{x,v}}$. We also use $\chi_v$ to denote the truth table of
the function (a vector in $\bitset^{2^n}$).

\begin{claim}
  For every $v \in \bitset^n$, the vector $\chi_v \in \bitset^{2^n}$
  is an eigenvector of $A$ with corresponding eigenvalue $\lambda_v =
  \hat{f}(v)$.
\end{claim}
\begin{proof}
  For every $i \in \bitset^n$ it holds that:
  \[
  (A \cdot \chi_v)_i = \sum_{j \in \bitset^n} A_{i,j} \cdot \chi_v(j) =
  \sum_{j : i+j \in S} \frac1{m} \cdot \chi_v(j) = \frac1{m} \sum_{k \in S}
  \chi_v(i+k)
  \]
  and since $v$ is linear we have:
  \[
  (A \cdot \chi_v)_i = \frac1{m} \sum_{k \in S} \chi_v(i)\chi_v(k) =
  \chi_v(i) \cdot \E_{k \in S} \left[ \chi_v(k) \right] = \chi_v(i)
  \cdot \E_{x \in \bitset^n} \left[ \chi_v(x) f(x) \right] = \chi_v(i)
  \cdot \hat{f}(v)
  \]
  that is, $\hat{f}(v)$ is an eigenvalue of $A$ with corresponding
  eigenvector $\chi_v$.
\end{proof}

We have found a set of \emph{at most} $2^n$ eigenvalues. We still need
to argue that there cannot be any additional eigenvalues.

\begin{claim}
  The set $\{ \chi_v : v \in \bitset^n \}$ forms an orthogonal
  basis.
\end{claim}
\begin{proof}
  It suffices to show that the set is orthogonal. Indeed, for every $v
  \neq w \in \bitset^n$ it holds that:
  \[
  \ip{\chi_v,\chi_w} = \E_i \left[ \chi_v(i) \chi_w(i) \right] = \E_i
  \left[ \chi_{v+w}(i) \right]
  \]
  which equals 0 because $v \neq w$.
\end{proof}

We have found $2^n$ orthogonal eigenvectors corresponding to the $k$
distinct eigenvalues in $\{\hat{f}(v) : v \in \bitset^n\}$, where
$1\leq k\leq 2^n$. Therefore, the sum of the geometric multiplicities
of these eigvenvalues is $2^n$. Since the sum of their algebreic
multiplicites is bound between the sum of the geometric multiplicities
and $2^n$, it is also $2^n$. Therefore, since the characteristic
polynomial $\det(A-\lambda I)$ is of degree $2^n$, we have found all
of its roots. It follows that the set $\{\hat{f}(v)\ |\
v\in\{0,1\}^n\}$ is precisely the set of all eigenvalues of $A$.

\paragraph{(d)}
By (c), every eigenvalue $\lambda$ of $A$ corresponds to the bias of a
linear test on $S$ including the ``empty test'' (i.e.,
$\chi_\emptyset$). From the empty test we obtain an eigenvalue $1$ and
from every other linear test we obtain an eigenvalue of magnitude at
most $\epsilon$. Thus the spectral gap is at least $1-\epsilon$ and by
Cheeger's inequality the graph is a good expander. Note that the
resulting graph has size $2^n$ and degree only $m=O \left(
  \frac{n}{\epsilon^3} \right)$.

\end{document}
