\documentclass[12pt]{amsart}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{macros-david}
\usepackage{fullpage}
\usepackage[all]{xy}
\usepackage{color}

\renewcommand{\familydefault}{\sfdefault}

\title{Chapter 5}
\date{}
\begin{document}
\maketitle

\section{walks}

\begin{definition}
A  \emph{ \( (k,n) \) -walk} is a pair \( (w;\vec {t}\,) \) where \( w \) is called the  \emph{base walk} and \( \vec t \) the  \emph{trajectory}. The base walk \( w=\sigma_1\ldots\sigma_k \) is an oriented walk of length \( k \) in the base graph \( B \) , where the \( \sigma_i \) denote oriented edges of the base graph. The trajectory \( \vec t = \left( (v_0,t_0), (v_1,t_1), \ldots, (v_k,t_k) \right) \) is an ordered \( k+1 \)-tuple of pairs \( (v_i,t_i) \) where \( v_i \) is a vertex of the base graph and \( t_i \) an integer between 1 and \( n \) . Additionally, \( (k,n) \) -walks are required to satisfy the following feasibility conditions:
\begin{itemize}
\item For all \( i=1,\ldots,k \) we have \( \textrm{tail}(\sigma_i) = v_{i-1} \) and \( \textrm{head}(\sigma_i)=v_i \) ,
\item if \( \sigma_i=\sigma_j \) then \( t_{i-1}=t_{j-1} \) if and only if \( t_i=t_j \) ,
\item if \( \sigma_i=\sigma_j^{-1} \) then \( t_{i-1}=t_j \) if and only if \( t_i=t_{j-1} \) .
\end{itemize}
We call \( k \) the  \emph{length} of the walk and \( n \) its  \emph{size}. The length of a walk is uniquely determined, but not its size. We say that the walk is  \emph{closed} if the base walk \( w \) is and if \( t_0=t_k \) . We say that the walk is  \emph{irreducible} if the base walk \( w \) is an irreducible (or non-backtracking) walk.
\end{definition}

The idea of that definition is that any walk in a random element \( G \) of \( \mathcal{G}_{n,B} \) is the lift of some walk in the base graph --encoded by the base walk \( w \) -- and the trajectory \( \vec t \) describes the vertices of that graph \( G \) visited by the walk. The conditions added in the definition ensure that the walk will exist in at least one of the random element of the \( \mathcal{G}_{n,B} \) model and we denote by \( P(w;\vec{t}\,) \) the probability of that event.

\begin{note}
\textit{Recall that a random element of the \( \mathcal{G}_{n,B} \) model is entirely described by the choice of a \( n \) -permutation \( \pi_j \) for each edge of the base graph. As a consequence the probability that a \( (k,n) \) -walk \( (w;\vec t\,) \) is realizable in a random element simply corresponds to the probability that the permutations \( \pi_j \) are chosen such that \( \sigma_j(t_{j-1})=t_j \) .}
\end{note}

\begin{remark}
We can view the trace of the \( k \) -th power of a random element of the \( \mathcal{G}_{n,B} \) model as a random variable, for which we have
\[
\textrm{Tr}(G^k) = \sum_{(w;\vec t\,) \in \mathcal{W}} P(w;\vec t\,)
\]
where \( \mathcal{W} \) is the collection of all closed \( (k,n) \) -walks based on \( B \) .
\end{remark}

\begin{definition}
Given a base walk \( w \) , we define an equivalence relation on all trajectories \( \vec t \) that make \( (w;\vec t\,) \) a \( (k,n) \) -walk as follow: let \( \vec t = \left( (v_0,t_0), (v_1,t_1), \ldots, (v_k,t_k) \right) \) and \( \vec s = \left( (v_0,s_0), (v_1,s_1), \ldots, (v_k,s_k) \right) \) be two such trajectories, then we say they  \emph{differ by a symmetry} if there exists a \( n \) -permutation \( \sigma \) such that \( \sigma(t_i) = s_i \) for all \( i = 0,\ldots,k \) . We denote by \( [\vec t\,]_n \) the associated equivalence class.
\end{definition}


\begin{definition}
A  \emph{walk collection}, \( \mathcal{W} = \{ \mathcal{W}(k,n) \}_{k,n \geq 1} \) is a collection where for any two positive integers \( k \) and \( n \) the elements of the set \( \mathcal{W}(k,n) \) are \( (k,n) \) -walks satisfying the following conditions.
\begin{itemize}
\item \textbf{Symmetry:} The walk collection contains all trajectories that differ by a symmetry; that is, if \( (w;\vec t\,) \in \mathcal{W}(k,n) \) then \( (w;\vec s\,) \in \mathcal{W}(k,n) \) for all \( \vec s \in [\vec t\,]_n \) .
\item \textbf{Size invariance:} The walk collection reflects the fact that the size of a walk isn't uniquely determined; that is, if \( (w;\vec t\,) \in \mathcal{W}(k,n) \) then \( (w;\vec t\,) \in \mathcal{W}(k,m) \) for all \( m \geq \max\{ t_i \mid \vec t = \left( (v_0,t_0), (v_1,t_1), \ldots, (v_k,t_k) \right) \} \) 
\item \textbf{Irreducible walks:} All the walks in the walk collection have to be irreducible; that is, the base walk \( w \) of any \( (k,n) \) -walk in the collection has to be irreducible.
\item \textbf{Closed walks:} All the walks in the walk collection have to be closed; that is, the base walk \( w \) of any \( (k,n) \) -walk \( (w;\vec t\,) \) in the collection has to be closed and its trajectory must satisfy \( t_0 = t_k \) .
\end{itemize}
Given a walk collection \( \mathcal{W} \) its associated  \emph{(k,n)-walk sum} is
\[
\emph{\textrm{WalkSum}(\mathcal{W},k,n)} = \sum_{(w;\vec t\,) \in \mathcal{W}(k,n)} P(w;\vec t\,)
\]
Note that this sum is always a finite sum (unless the base graph \( B \) has infinitely many edges). Since walk collections are symmetric, we can regroup the terms of the above sum by grouping trajectories that differ by a symmetry. Let
\[
 \emph{\text{E}_\text{symm}(w;\vec t\,)_n} = \sum_{\vec s \in [\vec t \, ]_n} P(w;\vec s\,)
\]
then we have
\[
\textrm{WalkSum}(\mathcal{W},k,n) = \sum_{(w;[\vec t\,]_n) \in \mathcal{W}(k,n)} \text{E}_\text{symm}(w;\vec t\,)_n
\]
\end{definition}

Let us now comment about the probability of a given walk. Let \( (w;\vec t\,) \) be a closed \( (k,n) \) -walk. Denote by  \emph{ \( a_j \) } the number of values of the corresponding permutation \( \pi_j \) that the conditions \( \sigma_i(t_{i-1})=t_i \) involve determining in order for the walk to be feasible; and denote by  \emph{ \( b_i \) } the number of vertices in the trajectory that belong to the fibre of the \( i^\text{th} \) vertex of the base graph \( B \) , then
\begin{align*}
P(w;\vec t\,) &= \prod_{j=1}^a \frac{(n-a_j)!}{n!} \\
\intertext{and}
\#[\vec t\,]_n &= \prod_{i=1}^s \frac{n!}{(n-b_i)!} \\
\intertext{and so we can conclude that}
\text{E}_\text{symm}(w;\vec t\,)_n &= \prod_{i=1}^s \frac{n!}{(n-b_i)!} \prod_{j=1}^a \frac{(n-a_j)!}{n!}
\end{align*}
We can denote by \(  \emph{e}=a_1+ \ldots +a_a \) and \(  \emph{v}=b_1 + \ldots + b_s \) and \( e \) is the number of edges of the walk and \( v \) its number of vertices.

\begin{definition}
Given a \( (k,n) \) -walk as above, its  \emph{order} is \( e-v \) .
\end{definition}

\begin{theorem}[Old 5.5]
There exists polynomials \( p_i=p_i(a_1,\ldots,a_a,b_1,\ldots,b_s) \) for all \( i \geq 0 \) such that
\[
\text{E}_\text{symm}(w;\vec t\,)_n = \frac{1}{n^{e-v}} \sum_{i \geq 0}p_i(a_1,\ldots,a_a,b_1,\ldots,b_s) \frac{1}{n^i}
\]	
and for any integer positive integer \( q \) we have
\[
\text{E}_\text{symm}(w;\vec t\,)_n = \frac{1}{n^{e-v}} \left( p_0 + \frac{p_1}{n} + \ldots + \frac{p_{q-1}}{n^{q-1}} + \frac{\textnormal{error}}{n^q} \right) 
\]
where
\[
|\textnormal{error}| \leq e^{qk/(n-k)}k^{2q}
\]
\end{theorem}
{\color{blue}\begin{proof}
Using our previous remark, we have
\begin{align*}
\text{E}_\text{symm}(w;\vec t\,)_n &= \prod_{i=1}^s \frac{n!}{(n-b_i)!} \prod_{j=1}^a \frac{(n-a_j)!}{n!}\\
\intertext{The terms where \( b_i=0 \) or \( a_j=0 \) can be ignored.}
&= \prod_{i=1}^s n (n-1) \ldots (n-b_i+1) \prod_{j=1}^a \frac{1}{n (n-1) \ldots (n-a_j+1)}\\
&= \prod_{i=1}^s n^{b_i} \left(1-\frac{1}{n}\right) \ldots \left(1-\frac{b_i-1}{n}\right) \prod_{j=1}^a n^{-a_j}\left(1-\frac{1}{n}\right)^{-1} \ldots \left(1-\frac{a_j-1}{n}\right)^{-1}\\
\intertext{Since \( b_1+\ldots b_s = v \) and \( a_1+\ldots +a_a = e \) we have}
&= \frac{1}{n^{e-v}} \prod_{i=1}^s \left(1-\frac{1}{n}\right) \ldots \left(1-\frac{b_i-1}{n}\right) \prod_{j=1}^a \left(1-\frac{1}{n}\right)^{-1} \ldots \left(1-\frac{a_j-1}{n}\right)^{-1}
\end{align*}
Notice that in the power series expansion about \( x=0 \) of
\[
(1-x)(1-2x)\ldots (1-mx) \quad\text{and}\quad (1-x)^{-1}(1-2x)^{-1} \ldots (1-mx)^{-1}
\]
the \( x^i \) coefficient is a polynomial (of degree at most \( 2i \) ) in \( m \) . Hence for \( x=1/n \) and for \( n \) sufficiently large, there exists polynomials \( p_0, p_1, \ldots \) in the variables \( a_1,\ldots,a_a, b_1, \ldots, b_s \) such that
\[
\text{E}_\text{symm}(w;\vec t\,)_n = \frac{1}{n^{e-v}} \sum_{i \geq 0}p_i(a_1,\ldots,a_a,b_1,\ldots,b_s) \frac{1}{n^i}
\]
We call these the  \emph{expansion polynomials}. To finish the proof of this theorem, we simply need to evaluate the size of the error obtained from Taylor's theorem if the above series is truncated after \( q \) terms. For this we use a lemma proved in [Fri91] page 339, equation (6) which states that if
\[
g(x) = (1-c_1x) \ldots (1-c_Nx)(1-d_1x)^{-1} \ldots (1-d_Mx)^{-1}
\]
where the \( c_i \) and \( d_j \) are positive constants, then \( g \) 's \( q \) -th derivative satisfies
\[
\frac{|g^{(q)}(x)|}{q!} \leq (1-x\,d_{\text{max}})^{-q} \left( \sum_{i=1}^N c_i + \sum_{j=1}^M d_j \right)^q
\]
In our case here, we have that
\begin{align*}
\left\{ c_1, \ldots, c_N \right\} &= \{ 0, \ldots, b_i-1 \mid i=1, \ldots, s \} \\
\left\{ d_1, \ldots, d_M \right\} &= \{ 0, \ldots, a_j-1 \mid j=1, \ldots, a \}
\end{align*}
By Taylor's theorem, the error term we are looking for satisfies
\[
|\textrm{error}| = \frac{|g^{(q)}(\xi)|}{q!} \leq (1-\xi\,d_{\text{max}})^{-q} \left( \sum_{i=1}^N c_i + \sum_{j=1}^M d_j \right)^q
\]
for some \( \xi \) between 0 and \( 1/n \) . Now since
\[
- \log[1-x] = x + \frac{x^2}{2} + \frac{x^3}{3} + \ldots \leq \frac{x}{1-x}
\]
and since
\[
d_{\max} = \max \{ b_i-1 \mid i=1,\ldots,s \} \leq v-1 \leq k
\]
we have that
\begin{align*}
\log[|\textrm{error}|] &\leq q \frac{\xi d_{\max}}{1-\xi d_{\max}} + \log\left[ \left( \sum_{i=1}^N c_i + \sum_{j=1}^M d_j \right)^q\right]\\
&\leq q \frac{k/n}{1-k/n} + \log\left[\left( \sum_{i=1}^{k-1} i + \sum_{j=1}^{k-1} j \right)^q\right]\\
&\leq \frac{qk}{n-k} + \log \left[\left( 2\binom{k}{2} \right)^q\right]\\
&\leq \frac{qk}{n-k} + \log \left[ k^{2q} \right]
\end{align*}
And so we can conclude that
\[
|\textrm{error}| \leq e^{qk/(n-k)} k^{2q}
\]
\end{proof}}

\begin{lemma}[Old 5.7]
Given a closed base word \( w \) of length \( k \) , we have that
\[
\sum_{\vec t \text{ such that } (w;\vec t\,) \text{ is of order }\geq r} \!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! P(w;\vec t\,) \leq n \binom{k}{r+1}\left(\frac{k}{n-k}\right)^{r+1}
\]
which, for \( k \leq n/2 \) is at most \( ck^{2r+2}n^{-r} \) for some constant \( c \) depending only on \( r \) .
\end{lemma}
{\color{blue}\begin{proof}
We can evaluate \( P(w;\vec t) \) by considering the steps of the walk one at a time. When considering the choice of the outcome of the random variable \( \sigma_i \) there are three cases to consider:
\begin{enumerate}
\item a forced choice: if \( t_{i+1} = \sigma_i(t_i) \) has already been determined by previous information,
\item a free choice: if \( t_{i+1} \) has not already been determined, in which case we distinguish between:
\begin{enumerate}
\item[(2a)] a coincidence: if AHHHHHHHHHHH horrible, now we have to explain that the base vertex has to be the same as well in order to be in the same fibre...
\item[(2b)] a generic choice: if else...
\end{enumerate}
\end{enumerate}
NOTE: this discussion should be outside of this proof, with a better description of the random variable stuff.
\end{proof}}

\begin{lemma}
Let \( (w;\vec t) \) be a \( (k,n) \)-walk, then the order of the walk is one less than the number of coincidences in that walk.
\end{lemma}
{\color{blue}\begin{proof}
We examine the walk step by step and consider each case. A forced choice doesn't affect the order of the walk since no new edge or vertex is added. A generic choice doesn't affect the order either since it adds both a new edge and a new vertex. On the other hand, a coincidence adds one to the order since a new edge is added but not a new vertex. At the beginning of the walk, we start with a simple vertex (that is a graph of order -1) and no coincidences, hence the claim.
\end{proof}}

\begin{lemma}[Old 5.8]
Let \( w \) be an irreducible base walk of length \( k \geq 2 \), then
\[
\# \left\{ [\vec t\,]_n \mid (w;\vec t\,) \text{ is of order less than }r \right\} \leq \sum_{i=0}^r \binom{k}{i}k^i \leq \frac{4}{3}k^{2r}
\]
\end{lemma}
{\color{blue}\begin{proof}
If the order of a walk is at most \( r-1 \), then it means the walk has at most \( r \) coincidences. Say we would like to build a trajectory \( \vec t \) which has \( j \) coincidences. Then we need to choose the steps at which these \( j \) coincidences will occur, that is \( \binom{k}{j} \) choices and each coincidence has to correspond to a fibre already used in the walk, that is at most \( k \) choices (per coincidence). Since we do not take into account the specific values that the trajectory will take, we are actually looking at the equivalence class of a trajectory. Hence there are at most
\[
\sum_{j=0}^r \binom{k}{j} k^j
\]
such equivalence classes. To finish this lemma, notice that
\begin{align*}
\sum_{j=0}^r \binom{k}{j} k^j &\leq \sum_{j=0}^r k^{2j}\\
&\leq k^{2r} \sum_{j \geq 0} k^{-2j}\\
\intertext{and since \( k \geq 2 \) we have}
&\leq k^{2r} \sum_{j \geq 0} 2^{-2j}\\
&\leq \frac{4}{3}k^{2r}
\end{align*}
\end{proof}}

\begin{theorem}[Old 5.9]
Let \( \mathcal{W} \) be a walk collection and let \( r \) be a positive integer, then for all \( k \leq n/2 \) we have
\[
\textrm{WalkSum}(\mathcal{W},k,n) = f_0(k) + \frac{f_1(k)}{n} + \ldots + \frac{f_{r-1}(k)}{n^{r-1}} + \frac{\textrm{error}}{n^r}
\]
where
\[
f_i(k) = \sum_{j=0}^{r-1} \sum_{(w;[\vec t\,]) \text{ of order }j, \in \mathcal{W}(k,n)} p_{i-j}(w;[\vec t\,])
\]
\end{theorem}




































\end{document}