%!TEX program = xelatex
%!TEX TS-program = xelatex
%!TEX encoding = UTF-8 Unicode

\documentclass[12pt,t,aspectratio=169,mathserif]{beamer}
%Other possible values are: 1610, 149, 54, 43 and 32. By default, it is to 128mm by 96mm(4:3).
%run XeLaTeX to compile.

\input{wang-slides-preamble.tex}

\begin{document}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title{Applied Stochastic Processes - Lecture 02}
\subtitle{(3.4 - 3.5) Markov Chains - First Step Analysis, More Examples}
%(3.4-3.5) 
%\institute{上海立信会计金融学院}
\author{MAP SK}
\date{March 16, 2021}

\maketitle

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\begin{enumerate}
%\item 第一讲：March 9 (3.1-3.3) Markov Chains - Concepts and Examples
%\item 第二讲：March 16 (3.4-3.5) Markov Chains - First Step Analysis, More Examples
%\item 第三讲：March 23 (4.1-4.4) Markov Chains - Long Run Behaviors
%\item 第四讲：April 13 (5.1-5.2) Poisson Processes - Concept and Examples 
%\item 第五讲：April 20 (5.3-5.4) Poisson Processes - Associated Distributions  
%\item 第六讲：May 11 (6.1-6.1) Markov Chains - Continuous Time, Pure Birth Processes
%\item 第七讲：May 18 (7.1-7.2) Renewal Processes - Renewal Function, Block Replacement
%\item 第八讲：May 25 (8.1-8.2) Brownian Motions -  the Reflection Principle 
%\end{enumerate}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{Contents }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}
\item  First step analysis
\item  Two-state Markov chain
\item  One-dimensional random walks
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.1. First step analysis }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  A surprising number of functionals on a Markov chain can be evaluated by a technique that we call {\color{red}first step analysis}. 

\item  This method proceeds by analyzing, or breaking down, the possibilities that can arise at the end of the first transition, and then invoking {\color{red}the law of total probability} coupled with the {\color{red}Markov property} to establish a characterizing relationship among the unknown variables. 

\item  We first applied this technique in the proof of CK equations. In this section, we develop a series of applications of the technique.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.2.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Consider the Markov chain $\{X_n\}$ whose transition probability matrix is 
\begin{eqnarray*}
P=
\begin{blockarray}{cccc}
& 0 & 1 & 2  \\
\begin{block}{c[ccc]}
  0   & 1 & 0 & 0  \\ 
  1   & \alpha & \beta & \gamma \\ 
  2   & 0 & 0 & 1 \\
\end{block}
\end{blockarray},
\end{eqnarray*}
where $\alpha>0$, $\beta>0$, $\gamma>0$, and $\alpha+\beta+\gamma=1$. 

\item  If the Markov chain begins in state 1, it remains there for a random duration and then proceeds either to state 0 or to state 2, where it is trapped or absorbed. 

\item  That is, once in state 0, the process remains there forever after, as it also does in state 2. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.3.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  Two questions arise: In which state, 0 or 2, is the process ultimately trapped, and how long, on the average, does it take to reach one of these states? 

\item  Both questions are easily answered by instituting a first step analysis.

\item  We begin by more precisely defining the questions. Let 
\begin{eqnarray*}
T = \min\{n\ge 0; X_n =0 \text{ or } X_n =2\}
%\label{eq3-18}
\end{eqnarray*}
be the time of absorption of the process. 

\item  In terms of this random absorption time, the two questions ask us to find
\begin{eqnarray*}
u=\mathbb{P}\{ X_T =0 \mid X_0 =1\} \text{ and } v = \mathbb{E}[ T \mid X_0 = 1 ].
%\label{eq3-18}
\end{eqnarray*}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.4.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  We proceed to institute a first step analysis, considering separately the three contingencies $X_1 = 0$, $X_1 = 1$, and $X_1 = 2$, with respective probabilities $\alpha$, $\beta$, and $\gamma$. 

\item  Consider $u = \mathbb{P}\{X_T = 0\mid X_0 = 1\}$. 

\begin{itemize}
\item  If $X_1 = 0$, which occurs with probability $\alpha$, then $T = 1$ and $X_T =0$. 

\item  If $X_1 =2$, which occurs with probability $\gamma$ ,then again $T =1$,but $X_T =2$. 

\item  Finally, if $X_1 = 1$, which occurs with probability $\beta$, then the process returns to state 1 and the problem repeats from the same state as before. 
\end{itemize}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.5.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  In symbols, we claim that
\begin{eqnarray*}
\mathbb{P}\{X_T =0 \mid X_1 =0\} &=& 1, \\
\mathbb{P}\{X_T =0 \mid X_1 =2\} &=& 0, \\
\mathbb{P}\{X_T =0 \mid X_1 =1\} &=& u,
%\label{eq3-18}
\end{eqnarray*}
which inserted into the law of total probability gives
\begin{eqnarray*}
u &=& \mathbb{P}\{X_T =0 \mid X_0 =1\} \\ 
&=& \Sigma_{k=0}^{2} \mathbb{P}\{X_T =0\mid X_0 =1,X_1 =k\} \mathbb{P}\{X_1 =k\mid X_0 =1\} \\
&=& \Sigma_{k=0}^{2} \mathbb{P}\{X_T = 0\mid X_1 = k\} \mathbb{P}\{X_1 = k\mid X_0 = 1\} \\
&=& 1(\alpha)+u(\beta)+0(\gamma)  %(by the Markov property)
%\label{eq3-18}
\end{eqnarray*}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.6.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Thus, we obtain the equation 
\begin{eqnarray}
u=\alpha+\beta u, 
\label{eq3-19}
\end{eqnarray}
which gives 
$$u=\frac{\alpha}{1-\beta}=\frac{\alpha}{\alpha+\gamma}.$$ 

\item  Observe that this quantity is the conditional probability of a transition to 0, given that a transition to 0 or 2 occurred. 

\item  That is, the answer makes sense.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.7.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  We turn to determining the mean time to absorption, again analyzing the possibilities arising on the first step. 

\item  The absorption time $T$ is always at least 1. 

\begin{itemize}

\item  If either $X_1 = 0$ or $X_1 = 2$, then no further steps are required. 

\item  If, on the other hand, $X_1 = 1$, then the process is back at its starting point, and on the average, $v = \mathbb{E} [ T\mid X_0 = 1]$ additional steps are required for absorption. 

\end{itemize}

\item  Weighting these contingencies by their respective probabilities, we obtain 
\begin{eqnarray}
v = 1+\alpha(0)+\beta(v)+\gamma(0) = 1+\beta v,
\label{eq3-20}
\end{eqnarray}
which gives $v=1/(1-\beta)$.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.8.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  In the example just studied, the reader is invited to verify that $T$ has the geometric
distribution in which
\begin{eqnarray*}
\mathbb{P}\{T > k\mid X_0 = 1\} = \beta^k \text{ for } k = 0,1,\cdots,
%\label{eq3-20}
\end{eqnarray*}
and, therefore, 
\begin{eqnarray*}
\mathbb{E} [ T \mid X_0 =1 ] = \sum\limits_{k=0}^{\infty} \mathbb{P}\{ T>k \mid X_0=1\} = \frac{1}{1-\beta}.
%\label{eq3-20}
\end{eqnarray*}

\item  That is, a direct calculation verifies the result of the first step analysis. 

\item  Unfortunately, in more general Markov chains, a direct calculation is rarely possible, and first step analysis provides the only solution technique.

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.9. The Two-State Markov Chain }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Let $P$ be the transition matrix of a two-state Markov chain,
\begin{eqnarray}
P=
\begin{blockarray}{ccc}
& 0 & 1   \\
\begin{block}{c[cc]}
  0   & 1-a & a  \\ 
  1   & b & 1-b  \\ 
\end{block}
\end{blockarray}. 
\label{eq3-30}
\end{eqnarray}

\item  When $a = 1 - b$ so that the rows of $P$ are the same, then the states $X_1,X_2,\cdots$ are independent identically distributed random variables with 
$$\mathbb{P}\{X_n = 0\} = b \text{ and } \mathbb{P}\{X_n = 1\} = a. $$

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.10.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  When $a\neq 1 - b$, the probability distribution for $X_n$ varies depending on the outcome $X_{n-1}$ at the previous stage.

\item  For the two-state Markov chain, it is readily verified by induction that the $n$-step transition matrix is given by
\begin{eqnarray}
P^n = \frac{1}{a+b} \begin{bmatrix} b&a \\ b&a \end{bmatrix} + \frac{(1-a-b)^n}{a+b} \begin{bmatrix} a&-a \\ -b&b \end{bmatrix}.
\label{eq3-31}
\end{eqnarray}

\item  Note that $|1-a-b|<1$ when $0<a,b<1$, and thus $|1-a-b|^n \to 0$ as $n\to\infty$ and 
\begin{eqnarray}
\lim\limits_{n\to\infty} P^n = \frac{1}{a+b} \begin{bmatrix} b&a \\ b&a \end{bmatrix}. 
\label{eq3-32}
\end{eqnarray}


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.11.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  For a numerical example, suppose that the items produced by a certain worker are graded as defective or not and that due to trends in raw material quality, whether or not a particular item is defective depends in part on whether or not the previous item was defective. 
Let $X_n$ denote the quality of the $n$th item with $X_n = 0$ meaning `good' and $X_n = 1$ meaning `defective'. 

\item  Suppose that $\{X_n\}$ evolves as a Markov chain whose transition matrix is
\begin{eqnarray*}
P=
\begin{blockarray}{ccc}
& 0 & 1   \\
\begin{block}{c[cc]}
  0   & 0.99 & 0.01    \\ 
  1   & 0.12 & 0.88   \\ 
\end{block}
\end{blockarray}. 
%\label{eq3-30}
\end{eqnarray*}

\item  Defective items would tend to appear in bunches in the output of such a system.
In the long run, the probability that an item produced by this system is defective is given by $a/(a + b) = 0.01/(0.01 + 0.12) = 0.077$.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.12. One-Dimensional Random Walks }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  When we discuss random walks, it is an aid to intuition to speak about the state of the system as the position of a moving `particle'.

\item  A one-dimensional random walk is a Markov chain whose state space is a finite or infinite subset $a,a+1,\cdots,b$ of the integers, in which the particle, if it is in state $i$, can in a single transition either stay in $i$ or move to one of the neighboring states $i-1, i+1$. 


\item  The designation `random walk' seems apt, since a realization of the process describes the path of a person (suitably intoxicated) moving randomly one step forward or backward. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.13.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  If the state space is taken as the nonnegative integers, the transition matrix of a random walk has the form

\begin{eqnarray*}
P=
\begin{blockarray}{cccccc}
          & 0    & 1     & 2 & 3 & \cdots   \\
\begin{block}{c[ccccc]}
  0      & r_0 & p_0 & 0 & 0  & \cdots  \\ 
  1      & q_1 & r_1 & p_1 & 0  & \cdots  \\ 
  2      & 0 & q_2 & r_2 & p_2  & \cdots  \\ 
  3      & 0 & 0 & q_3 & r_3  & \cdots  \\ 
\vdots & \vdots & \vdots & \vdots & \vdots  &  \\ 
\end{block}
\end{blockarray}. 
%\label{eq3-38}
\end{eqnarray*}
where $p_i>0$, $q_i >0$, $r_i \ge 0$, $q_i +r_i +p_i =1$, $i\ge 1$, \\ 
and $p_0 \ge 0$, $r_0 \ge 0$, $r_0+p_0 =1$.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{2.14.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  The fortune of a player engaged in a series of contests is often depicted by a random walk process. 

\item  Specifically, suppose an individual (player A) with fortune $k$ plays a game against an infinitely rich adversary and has probability $p_k$ of winning one unit and probability $q_k = 1-p_k$ ($k \ge 1$) of losing one unit in the next contest (the choice of the contest at each stage may depend on his fortune), and $r_0 = 1$. 

\item  The process $X_n$, where $X_n$ represents his fortune after $n$ contests, is clearly a random walk. 

\item  Note that once the state 0 is reached (i.e., player A is wiped out), the process remains in that state. 
%
The event of reaching state $k = 0$ is commonly known as the `gambler's ruin'.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\end{document}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%









