\documentclass[11pt]{article}
%\documentclass{sig-alternate}
\usepackage{algorithm}
\usepackage{algorithmic}

\usepackage{subfigure}
\usepackage{epsfig,amsthm,amsmath,color, amsfonts}
\usepackage{epsfig,color}
\newcommand{\xxx}[1]{\textcolor{red}{#1}}
%\usepackage{fullpage}
\usepackage{framed}
%\usepackage{epsf}
%\usepackage{hyperref}

%\setlength{\textheight}{9.4in} \setlength{\textwidth}{6.55in}
\setlength{\textheight}{9.2in} \setlength{\textwidth}{6.55in}
%\setlength{\topmargin}{0in}

\voffset=-0.9in
\hoffset=-0.8in

\newtheorem{theorem}{Theorem}[section]
%\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
%\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
\theoremstyle{observation}\newtheorem{observation}[theorem]{Observation}

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}

\def\m{{\rm min}}
%\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}

\newcommand{\algorithmsize}[0]{}

%---------------------
%  SPACE SAVERS
%---------------------

\usepackage{times}
\usepackage[small,compact]{titlesec}
\usepackage[small,it]{caption}

\newcommand{\squishlist}{
 \begin{list}{$\bullet$}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishend}{
  \end{list}  }

%---------------------------------
% FOR MOVING PROOFS TO APPENDIX
%\usepackage{answers}
%%\usepackage[nosolutionfiles]{answers}
%\Newassociation{movedProof}{MovedProof}{movedProofs}
%\renewenvironment{MovedProof}[1]{\begin{proof}}{\end{proof}}

\def\e{{\rm E}}
\def\var{{\rm Var}}
\def\ent{{\rm Ent}}
\def\eps{{\epsilon}}
\def\lam{{\lambda}}
\def\bone{{\bf 1}}



\begin{document}


\title{Theorem Part}

%\begin{titlepage}


\date{}

\maketitle \thispagestyle{empty}

\vspace*{.4in}


\maketitle


\section{Theorem}
\begin{lemma}\label{lem:uniformity}
For any expander graph $G$, it only takes $t = O(\log n)$ steps for a random walk to converge to the uniform distribution.
\end{lemma}
\begin{proof}
Let $M$ be the transition matrix of $G$, $1 = \lambda_1 \geq \lambda_2 \geq ... \geq \lambda_n $ be the eigenvalues of $M$, and $x_1, ... , x_n$ be a system of orthonormal eigenvectors. Let $p \in \mathbb{R}^V$ be a row vector that represents a probability distribution over vertices $V$. Then for every $t \geq 1$, $p M^t$ is a probability distribution, and it represents the distribution of the final vertex in a $t$-step random walk in $G$ that starts
at a vertex selected according to $p$. Since $p$ is a probability distribution, it can be write as $p = \alpha_1 x_1 + ... + \alpha_n x_n$. Then 
$$pM^t = \alpha_1 x_1 + \alpha_2 \lambda_2^t x_2 + ... + \alpha_n \lambda_n^t x_n$$
where $x_1 = \frac{1}{\sqrt{n}}(1, ... ,1)$ and $\alpha_1 = p. x_1^T = \frac{1}{\sqrt{n}}\sum_vp(v) = \frac{1}{\sqrt{n}}$, so that $\alpha_1 x_1 = (\frac{1}{n}, ..., \frac{1}{n})$ is the uniform distribution, say denoted by $p U$. Now,
\begin{align*}
\parallel pM^t - pU \parallel & = \parallel \alpha_2 \lambda_2^t x_2 + ... + \alpha_n \lambda_n^t x_n \parallel \\
& = \sqrt{\alpha_2^2 \lambda_2^2t + ... + \alpha_n^2 \lambda_n^2t} \\
& \leq \max_{i=2(1)n} \mid \lambda_i \mid^t . \sqrt{\alpha_2^2 + ... + \alpha_n^2} \\
& \leq \max_{i=2(1)n} \mid \lambda_i \mid^t . \parallel p \parallel \\
& \leq \max_{i=2(1)n} \mid \lambda_i \mid^t
\end{align*} 
where we use the fact that if p is a probability distribution then 
$$\parallel p \parallel = \sqrt{\sum_{v} p^2(v)} \leq \sqrt{\sum_{v} p(v)} = 1 $$
Let $\bar{\lambda}_2 = \max_{i=2(1)n} \mid \lambda_i \mid = \max \{\lambda_2, -\lambda_n\} $ be the second largest eigenvalue in absolute
value. Therefore, we get $$ \parallel pM^t - pU \parallel \leq \bar{\lambda}_2^t $$
So if we choose $ t = O(\frac{2}{1-\bar{\lambda}_2}\log n)$ then one can have, say
$$ \parallel pM^t - pU \parallel \leq \frac{1}{100 n^{2}} $$
Since $\bar{\lambda}_2$ is constant for expander graph so $O(\log n)$ steps is sufficient for a random walk to converge to the uniform distribution. 
\end{proof}
\begin{lemma}\label{lem:expander-fraction}
 If the number of short walks is $\Theta(d(v)\log n)$ from each vertex $v$ in random $G(n,p)$ graph, where $p = \frac{\log n}{n}$, then $1 - O(\frac{1}{\sqrt{\log n}})$ fraction of the short walks
get utilized. Case1: if the length of the short walk is $\geq \log n$, Case2: if the length of the short walk is $< \log n$. 
%This result also holds for the random geometric graph $G(n,r)$ with $r = \sqrt{\frac{\log n}{n}}$. ??
\end{lemma}
\begin{proof}
\textbf{Case 1:} Let $\sqrt{\ell}$ is larger than $ \log n$, the mixing time of expander graph (cf. Lemma~\ref{lem:uniformity}). \\
There are $\Theta(\log^2 n)$ short walks from each node $v$, since $d(v)$ is $\Theta(\log n)$ w.h.p for this graph. So in total $\Theta(n \log^2 n) =T$ (say) short walks. Now we are considering $n$ vertex as $n$ bins and $T$ short walks as $T$ balls. Think reverse way: $T$ balls are thrown independently to $n$ bins where capacity of each bin is $\Theta(\log^2 n)$. We want to calculate the imbalance of each bin from its capacity when the first bin reaches its capacity.\\

Let $X_1^i, X_2^i, ..., X_T^i$ be the 0-1 random variable such that 
$$ X_j^i = \left\{
  \begin{array}{l l}
    1 & \quad \text{if the ball j-th is in i-th bin}\\
    0 & \quad \text{Otherwise}\\
  \end{array} \right. $$
Let $X^i = \sum_{j=1}^{T} X_j^i $. Then $E[X^i] = \frac{T}{n} = \Theta( \log^2 n) $. Since the variables $X_1^i, X_2^i, ..., X_T^i$ are i.i.d, by Chernoff's bound (for $0<\delta<1$)
$$ Pr(\mid X^i - E[X^i] \mid) \geq E[X^i] \delta ) \leq 2 e^{-E[X^i] \delta^2/3} $$
Choosing $\delta = \frac{c}{\sqrt{\log n}}$ we get,
$$ Pr(\mid X^i - E[X^i] \mid) \geq (\log n)^{3/2} ) < \frac{1}{n^2}$$ for suitable constant $c$. \\

Thus we see that there is a wastage $O((\log n)^{3/2})$ of short walks for each vertex w.h.p. Hence the fraction of wastage for all vertex $\frac{n (\log n)^{3/2}}{n \log^2 n} = \frac{1}{\sqrt{\log n}}$.  \\
Therefore the fraction of short walks get utilized is $1 - O(\frac{1}{\sqrt{\log n}})$ with high probability. 

\textbf{Case 2:} Let $\sqrt{\ell}$ is small say $< \log n$(mixing time) then number of $\ell$-length walks $K$ is large, infact: $K = O(m)$ as available number of connector is $\eta d(v) \log n$. So in this case we can use Chernoff's bound as each $\ell$ length walk is greater than mixing time and $K$ is large enough so that we would not bothering about short length walks(which could be less than mixing time). We will show that there is $c \log n$ imbalance of utilization of short walks for each vertex.\\
Let us assume that the source nodes chosen uniformly for each walk. Let $X_i^v =$ the number of times $i$-th walk visited the vertex $v$ as a connector for all $i = 1,2,...K$. Then $X_i^v \in [0, \sqrt{\ell}]$. Let $X^v = \sum_{i = 1}^{K}X_i^v$. Then $E[X^v] = \frac{K d(v) \sqrt{\ell}}{2m}$, since for one $\ell$ length walk, in expectation a vertex $v$ can be connector $ \frac{d(v)}{2m} \sqrt{\ell}$ times. Note that here each $X_i^v$ is not 0-1 random variables but bounded in some interval $[0, \sqrt{\ell}]$. Using the Chernoff's bound~\cite{?}
$$Pr(\mid X^v - E[X^v] \mid) \geq t ) \leq 2 \exp\left(\frac{-2t^2}{\sum_i(\sqrt{\ell} - 0)^2}\right)$$
Therefore, 
\begin{align*}
Pr\left(\mid X^i - E[X^i] \mid) \geq \alpha (\log n)^{3/2} \right) & \leq 2 \exp\left(\frac{-2 \alpha^2 \log^3 n}{K \ell}\right) \\
& < \frac{2}{n^2} & \text{[Since, $\ell = \log^2 n$ and by suitably chosen $\alpha$]}
\end{align*}
Thus we see that there is a wastage $O((\log n)^{3/2})$ of short walks for each vertex w.h.p. Hence the fraction of wastage for all vertex $\frac{n (\log n)^{3/2}}{n \log^2 n} = \frac{1}{\sqrt{\log n}}$.  \\
Therefore the fraction of short walks get utilized is $1 - O(\frac{1}{\sqrt{\log n}})$ with high probability.

\end{proof}


\end{document}
