\documentclass[paper=a4, fontsize=12pt]{scrartcl}
\newcommand{\bfvec}[1]{\mbox{\boldmath$#1$}}
\usepackage{graphicx}
\usepackage{listings}
\usepackage{amssymb}
\usepackage{charter}
\usepackage[top=0.75in, bottom=1in, left=0.75in, right=0.5in]{geometry} 
\usepackage{amsmath}
\title{	
\normalfont \normalsize 
\textsc{Shanghai Jiaotong University, Zhiyuan College} \\ [25pt]
\huge Exercises for Chapter 3\\
}
\author{Feng Shi}
\date{\normalsize\today}
\begin{document}
\maketitle

\begin{enumerate}

\item[Exercise 3.1]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.2]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.3]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.4]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.5]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.6]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.7]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.8]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.9]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.10]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.11]
	{\Large Question:}
	
	What is the expected number of squares (4-cycles) in $G(n,\frac{d}{d})$?
	What is the expected number of 4-cliques in $G(n,\frac{d}{n})$?
	
	{\Large Solution:}

	\begin{enumerate}
	\item
	There are $\binom{n}{4}$ ways of choosing a quadruple from a $G(n,\frac{d}{n})$.
	And there are 3 possible squares(4-cycles) in one quadruple.
	So the expected number of squares(4-cycles) in $G(n,\frac{d}{n})$ is
	$$E(4-cycles)=3\binom{n}{4}\bigg(\frac{d}{n}\bigg)^4=\frac{d^{4}}{8}$$
	\item
	$\binom{n}{4}$ for choosing a quadruple and $\bigg(\frac{d}{n}\bigg)^6$ for the
	quadruple to be a 4-clique. 
	So the expected number of 4-cliques in $G(n,\frac{d}{n})$ is
	$$E(4-cliques)=\binom{n}{4}\bigg(\frac{d}{n}\bigg)^6=\frac{d^6}{24n^2}$$
	\end{enumerate}
\vspace{20pt}

\item[Exercise 3.12]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.13]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.14]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.15]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.16]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.17]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.18]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}

\item[Ex 3.19]
{\Large Solution}

Since all $x_{i}$ are stastically independent. the expected value of \bfvec{x}
$$E(\bfvec{x})=E(\sum_{i=1}^{n}x_{i})=\sum_{i=1}^{n}E(x_{i})=\frac{d}{2}$$
and the variance of \bfvec{x}
\begin{align*}
\sigma^{2}(\bfvec{x})&=E((\bfvec{x}-\frac{d}{2})^{2}) \\
&=E(\bfvec{x}^2)-E^{2}(\bfvec{x}) \\
&=E(\sum_{i=1}^{n}x_{i}^2+\sum_{i=1}^{n}\sum_{j=1,j\neq i}^{n}x_{i}x_{j})-\frac{d^2}{4}
\end{align*}
Because $x_{i}$ is indicator variable and is identically distributed, $E(x_{i}^2)=E(x_{i})$.

And $E(x_{i}x_{j})=\frac{1}{4}$, so
$$\sigma^{2}(\bfvec{x})=\frac{n}{2}+\frac{1}{4}\cdot 2\cdot \frac{n(n-1)}{2}-\frac{n^2}{4}=\frac{n}{4}$$
By Chebyshev's inequality,
\begin{align*}
Prob\big( \bfvec{x}=0 \big)&\leq Prob\big( |\bfvec{x}-E(\bfvec{x})|\geq E(\bfvec{x}) \big) \\
&leq \frac{\sigma(\bfvec{x})}{E^2(\bfvec{x})} \\
&=\frac{\sqrt{\frac{n}{4}}}{(\frac{n}{2})^2} \\
&=\frac{1}{8n^\frac{3}{2}}\to 0
\end{align*}
\vspace{20pt}

\vspace{20pt}
\item[Exercise 3.20]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.21]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.22]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.23]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.24]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.25]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.26]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.27]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.28]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.29]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}

\item[Exercise 3.30]
{\Large Solution:}

\begin{enumerate}
\item[-]
{\large Define increasing property}

\bfvec{Q} is an increasing property of $N(n,p)$ i.f.f. it satisfies:

if $N_{1}$ has property \bfvec{Q} then $N_{2}$ obtained by including integers from 
$\lbrace 1,2,...,n \rbrace$ also has property \bfvec{Q}.
\vspace{20pt}

\item[-]
{\large Prove every increasing property of $N(n,p)$ has a threshold}

\textbf{Lemma 1.} \textsl{If \bfvec{Q} is an increasing property and $0\leq p\leq q\leq 1$,
then the probablity that $N(n,p)$ has property \bfvec{Q} is less than or equal to 
the probablity that $N(n,q)$ has property \bfvec{Q}.}

\textbf{Proof:} (of lemma 1.)

Simlar to the proof of \emph{Lemma 3.18} on the textbook. 
First generate $N(n,p)$. Then independently generate $N(n,\frac{q-p}{1-p})$ and take
the union by putting in an integer from $\lbrace 1,2,...,n \rbrace$ if either of the
two subsets has the integer. Call the resulting subset $H$. 
$H$ has the same distribution as $N(n,q)$ because the probablity that an integer is
in $H$ is $p+(1-p)\frac{q-p}{1-p}=q$ and the components of $H$ are independent. 
The lemma follows since whenever $N(n,p)$ has the property \bfvec{Q}, H also has
the property \bfvec{Q}.
\vspace{10pt}

\textbf{Lemma 2.} \textsl{If $N(n,q)$ is a $m-$fold replication of N(n,p), 
then there are inequalities below}
$$Prob\big( N(n,q) \ does \ not \ have \ \bfvec{Q} \big)\leq
	\bigg(Prob\big( N(n,p) \ does \ not \ have \ \bfvec{Q} \big)\bigg)^{m}$$
$$Prob\big( N(n,q) \ has \ \bfvec{Q} \big)\leq
	Prob\big( N(n,mp) \ has \ \bfvec{Q} \big)$$
$$Prob\big( N(n,mp) \ does \ not \ have \ \bfvec{Q} \big)\leq
	Prob\big( N(n,q) \ does \ not \ have \ \bfvec{Q} \big)$$
$$Prob\big( N(n,mp) \ does \ not \ have \ \bfvec{Q}\leq
	\bigg(Prob\big( N(n,p) \ does \ not \ have \ \bfvec{Q} \big)\bigg)^{m}$$

\textbf{Proof:} (of lemma 2.) 

$q=q-(1-p)^{m}\leq 1-(1-mp)=mp$ by Bernoulli inequality.

The proof is same as those on the textbook.
\vspace{10pt}

\textbf{Lemma 3.} $\forall\varepsilon \in (0,\frac{1}{2})$, 
take $m$ such that $(1-\varepsilon)^m \leq \varepsilon$.
Then
$$P_{1-\varepsilon}<mP_{\varepsilon}$$
where $P_{1-\varepsilon}$ is the probability $P$ satisfying
$$Prob\big( N(n,P) \ has \ \bfvec{Q}\big)=1-\varepsilon$$
and $P_{\varepsilon}$ defined similarly.

\textbf{Proof:} (of lemma 3.)
Let $N(n,q)$ be the $m-$fold replication of $N(n,P_{\varepsilon})$.
Then by \textsl{lemma 2} we have 
$$Prob\big( N(n,mP_{\varepsilon}) \ has \ \bfvec{Q} \big)
	\geq Prob\big( N(n,q) \ has \ \bfvec{Q}\big)
	\geq 1-\varepsilon$$
And by the increasing property of \bfvec{Q} we have
	$$mP_{\varepsilon}\geq P_{1-\varepsilon}$$

\vspace{10pt}

\textbf{Theorem} Every increasing property \bfvec{Q} of $N(n,p)$ has a threshold.
$P_{1/2}$ is a threshold where $P_{1/2}$ stands for the probablity satisfying
$$Prob\big( N(n,P_{1/2}) \ has \ \bfvec{Q} \big)=\frac{1}{2}$$

\textbf{Proof:} (of the theorem)

\begin{enumerate}
\item[i.]
$\forall$ $\bar{p}(n)=o(P_{1/2})$, that is,
$\lim_{n \to \infty} \frac{\bar{p}(n)}{P_{1/2}}=0$, we prove that
$$Prob\big( N(n,\bar{p}(n)) \ has \ \bfvec{Q}\big)\to 0 \ as \ n\to\infty$$

Proof by contradiction:

Suppose $Prob\big( N(n,\bar{p}(n) \ has \ \bfvec{Q})\big)\to\epsilon$ 
where $\epsilon\in(0,\frac{1}{2})$. Then by $\varepsilon-\Delta$ definition of
limitation, we have
	$$\exists N_0\in \mathbb{N},\varepsilon\in (0,\frac{1}{2}) \ s.t. \ 
	\forall N>N_0, \ Prob\big( N(n,\bar{p}(n) \ has \ \bfvec{Q}\big)>\varepsilon$$

Take $m \ s.t. \ (1-\varepsilon)<\varepsilon^m$. Then by \textsl{lemma 3} we have
$$P_{1-\varepsilon}<mP_\varepsilon$$.

Because \bfvec{Q} is an increasing property.
$$P_{1/2}<P_{1-\varepsilon} \ , \ P_\varepsilon<\bar{p}(n)$$

So
$$P_{1/2}<P_{1-\varepsilon}<mP_{\varepsilon}<m\bar{p}(n)$$

Because $m$ is only related to $\varepsilon$ but not $n$, this yields 
$$\lim_{n\to\infty} \frac{\bar{p}(n)}{P_{1/2}}>0$$
which contradicts $\bar{p}(n)=o(P_{1/2})$, so
$$Prob\big( N(n,\bar{p}(n)) \ has \ \bfvec{Q}\big)\to 0 \ as \ n\to\infty$$

\item[ii.]
$\forall$ $\hat{p}(n)=\Omega(P_{1/2})$, that is,
$\lim_{n \to \infty} \frac{\hat{p}(n)}{P_{1/2}}=\infty$, we prove that
$$Prob\big( N(n,\hat{p}(n)) \ has \ \bfvec{Q}\big)\to 1 \ as \ n\to\infty$$

Proof by contradiction:

Suppose	$Prob\big( N(n,\hat{p}(n) \ has \ \bfvec{Q})\big)\to 1-\epsilon$  
where $\epsilon\in(0,\frac{1}{2})$. Then by $\varepsilon-\Delta$ definition of
limitation, we have
	$$\exists N_0\in \mathbb{N},\varepsilon\in (0,\frac{1}{2}) \ s.t. \ 
	\forall N>N_0, \ Prob\big( N(n,\hat{p}(n) \ has \ \bfvec{Q}\big)<1-\varepsilon$$
Then similarly by \textsl{lemma 3} and increasing property of \bfvec{Q}, we have
$$\hat{p}(n)<P_{1-\varepsilon}<mP_\varepsilon<mP_{1/2}$$
which contradicts with $\hat{p}(n)=\Omega(P_{1/2})$, so
$$Prob\big( N(n,\hat{p}(n)) \ has \ \bfvec{Q}\big)\to 1 \ as \ n\to\infty$$
\vspace{10pt}
By \textsl{i.} and \textsl{ii.} we can conclude that $P_{1/2}$ is a threshold for
increasing property \bfvec{Q} of $N(n,p)$.
\end{enumerate} %end of 2 conditions
\vspace{20pt}
{\large P.S.} The proof of \textsl{theorem 3.19} didn't include \textsl{lemma 3} 
above. Although it says ``A symmetric argument...'' after proving about $p_{0}(n)=o(P_{1/2})$
, simply moding and ``reversing'' that proof doesn't work well.
So I think \textsl{lemma 3} is neccessary.
\end{enumerate} %end of ex 3.19

\vspace{20pt}
\item[Exercise 3.31]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.32]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.33]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.34]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.35]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.36]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.37]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.38]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.39]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.40]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.41]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.42]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.43]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.44]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.45]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.46]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.47]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.48]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.49]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.50]
	{\Large Question:}

	In $G(n,p)$, let $x_k$ be the number of connected components of size $k$.
	Using $x_k$, write down the probability that a randomly chosen vertex is in a 
	connected component of size $k$. 
	Also write down the expected size of the connected component a randomly chosen
	vertex is in.

	{\Large Solution:}
	
	The number of vertices in the graph is:
		$$n=\sum_{k=1}^{\infty}kx_k$$
	So the probability that a randomly chosen vertex is in a connected components 
	of size $k$ is:
		$$p_1=\frac{kx_k}{\sum_{k=1}^{\infty}kx_k}$$
	And the expected size of the connected component a randomly chosen vertex is in is		:
		$$E(x)=\sum_{k=1}^{\infty}\frac{k^2x_k}{\sum_{k=1}^{\infty}kx_k}$$
\vspace{20pt}
\item[Exercise 3.51]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.52]
	{\Large Question:}
	
	For $d>1$ let $\theta$ be the unique root in $(0,1)$ of $1-e^{-dx}-x$.
	Prove that the expected value of the size of the frontier increases linearly
	with $i$ for $i$ in the neighborhood of $\theta$.

	{\Large Solution:}

\vspace{20pt}
\item[Exercise 3.53]
	{\Large Question:}
	
	Consider the binomial distribution $binomial[n,1-(1-\frac{d}{n})^i]$ for $d>1$.
	Prove that as $n\to \infty$, the distribution goes to zero for all $i$ 
	except for $i$ in the two ranges
	$[0, c_1\ln n]$ and $[\theta−c_2\sqrt{n},\theta+c_2\sqrt{n}]$.
	
	{\Large Solution:}
	
	Let random variable $z_i=binomial[n,1-(\frac{d}{n})^i]$.

	The expected value of the binomial distribution is (for small $i$):
		$$E(z_i)=n(1-(1-\frac{d}{n})^i)\approx n(1-e^{-\frac{id}{n}})
		\approx n(1-(1-\frac{id}{n}+...))\approx id$$
		$$Prob(z_i=k)=\binom{n}{k}\bigg(\frac{id}{n}\bigg)^{k}
		\bigg(1-\frac{id}{n}\bigg)^{1-k}\approx \frac{n!}{k!}\bigg(\frac{id}{k}\bigg)
		e^{-id}\approx \frac{(id)^k}{k!}e^{-id}$$
	This probability distribution can be approximated by Poisson distribution
	with $\lambda=id$. When $i=c_1\ln n$, the probability goes to zero as $n\to \infty$.
		
	When $i=O(n)$, the binomial distribution $binomial(n,p)=binomial(n-1,1-(1-\frac{d}{n})^i)$
	can be approximated to Gaussian distribution:$e^{-\frac{l^2}{\sigma^2}}$
	where $\sigma=np(1-p)$ and $l=|\theta n-i|$. The Gaussian distribution falls off
	exponentially fast with the squared distance from its mean. Thus, in order to
	have none-vanishing probability, $k$ must take at most $\sqrt{n}$.

	So we can conclude: the distribution goes to zero for all $i$ except for $i$ in
	the two ranges $[0,c_1\ln n]$ and $[\theta n-c_2\sqrt{n},\theta n+c_2\sqrt{n}]$
\vspace{20pt}
\item[Exercise 3.54]
	{\Large Question:}

	For $f(x)=1-e^{-dx}-x$,what is the value of $x_{max}=argmax f(x)$?
	what is the value of $f(x_{max})$?
	Where does the expected value of the frontier of a BFS in $G(n,\frac{d}{n})$
	occur as a function of n?

	{\Large Solution:}

\vspace{20pt}
\item[Exercise 3.55]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.56]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.57]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.58]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.59]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.60]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.61]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.62]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.63]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.64]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.65]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.66]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.67]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.68]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.69]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.70]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.71]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.72]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.73]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}
\item[Exercise 3.74]
	{\Large Question:}

	{\Large Solution:}
\vspace{20pt}

\end{enumerate}
\end{document}

