%!TEX program = xelatex
%!TEX TS-program = xelatex
%!TEX encoding = UTF-8 Unicode

\documentclass[12pt,t,aspectratio=169,mathserif]{beamer}
%Other possible values are: 1610, 149, 54, 43 and 32. By default, it is to 128mm by 96mm(4:3).
%run XeLaTeX to compile.

\input{wang-slides-preamble.tex}

\begin{document}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title{Applied Stochastic Processes - Lecture 03}
\subtitle{(4.1 - 4.4) Markov Chains - Long Run Behaviors}
%(4.1-4.4) 
%\institute{上海立信会计金融学院}
\author{MAP SK}
\date{March 23, 2021}

\maketitle

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\begin{enumerate}
%\item 第一讲：March 9 (3.1-3.3) Markov Chains - Concepts and Examples
%\item 第二讲：March 16 (3.4-3.5) Markov Chains - First Step Analysis, More Examples
%\item 第三讲：March 23 (4.1-4.4) Markov Chains - Long Run Behaviors
%\item 第四讲：April 13 (5.1-5.2) Poisson Processes - Concept and Examples 
%\item 第五讲：April 20 (5.3-5.4) Poisson Processes - Associated Distributions  
%\item 第六讲：May 11 (6.1-6.1) Markov Chains - Continuous Time, Pure Birth Processes
%\item 第七讲：May 18 (7.1-7.2) Renewal Processes - Renewal Function, Block Replacement
%\item 第八讲：May 25 (8.1-8.2) Brownian Motions -  the Reflection Principle 
%\end{enumerate}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{Contents }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}
\item  Regular Markov chains
\item  Limiting theorem of regular Markov chains
\item  Doubly stochastic matrices
\item  Two interpretations of the limiting distribution
\item  Optimal replacement rules
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.1. Regular Markov chains }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Suppose that a transition probability matrix $P = (P_{ij})$ on a finite number of states labeled $0,1,\cdots,N$ has the property that when raised to some power $k$, the matrix $P^k$ has all of its elements strictly positive. 

\item  Such a transition probability matrix, or the corresponding Markov chain, is called {\color{red}regular}. 

\item  The most important fact concerning a regular Markov chain is the existence of a {\color{red}limiting probability distribution} $\pi=(\pi_0,\pi_1,\cdots,\pi_N)$, where $\pi_j >0$ for $j = 0,1,\cdots,N$ and  $\sum_j\pi_j = 1$, and this distribution is independent of the initial state. 


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.2.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Formally, for a regular transition probability matrix $P = (P_{ij})$, we have the convergence
\begin{eqnarray*}
\lim\limits_{n\to\infty} P^{(n)}_{ij} = \pi_j > 0 \text{ for } j = 0,1,\cdots,N, 
%\label{eq4-1}
\end{eqnarray*}
or, in terms of the Markov chain $\{Xn\}$,
\begin{eqnarray*}
\lim\limits_{n\to\infty} \mathbb{P}\{X_n =j\mid X_0 =i\} = \pi_j >0 \text{ for }j = 0,1,\cdots,N.
%\label{eq4-1}
\end{eqnarray*}

\item  This convergence means that, in the long run ($n \to\infty$), the probability of finding the Markov chain in state $j$ is approximately $\pi_j$ no matter in which state the chain began at time 0.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.3. Examples of regular Markov chains }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  The Markov chain whose transition probability matrix is 
\begin{eqnarray}
P=
\begin{blockarray}{ccc}
& 0 & 1  \\
\begin{block}{c[cc]}
  0   & 1-a & a  \\ 
  1   & b & 1-b \\
\end{block}
\end{blockarray}
\label{eq4-1}
\end{eqnarray}
is regular when $0 < a, b < 1$, and in this case, the limiting distribution is 
$$\pi = \left(\frac{b}{a + b}, \frac{a}{a + b}\right). $$ 

\item  To give a numerical example, we will suppose that $a=2/3, b=3/4$, and calculate $P^n$. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
%\begin{frame}{4.4.  }
%
%\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%%每页详细内容
%
%\begin{itemize}
%
%\item  Sociologists often assume that the social classes of successive generations in a family can be regarded as a Markov chain. Thus, the occupation of a son is assumed to depend only on his father's occupation and not on his grandfather's. Suppose that such a model is appropriate and that the transition probability matrix is given by
%\begin{eqnarray*}
%P=
%\begin{blockarray}{cccc}
%& Lower & Middle & Upper  \\
%\begin{block}{c[ccc]}
%  Lower   & 0.40 & 0.50 & 0.10  \\ 
%  Middle  & 0.05 & 0.70 & 0.25  \\ 
%  Upper   & 0.05 & 0.50 & 0.45  \\ 
%\end{block}
%\end{blockarray}
%%\label{eq4-1}
%\end{eqnarray*}
%
%\item  For such a population, what fraction of people are middle class in the long run?
%
%
%\end{itemize}
%
%\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
%\begin{frame}{4.1.  }
%
%\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%%每页详细内容
%
%\begin{itemize}
%
%\item  For the time being, we will answer the question by computing sufficiently high powers of $P^n$. 
%
%\item  A better method for determining the limiting distribution will be presented later in this section.
%
%\item  Note that we have not computed $P^n$ for consecutive values of $n$ but have speeded up the calculations by evaluating the successive squares $P^2$,$P^4$,$P^8$.
%
%\item  In the long run, approximately 62.5\% of the population are middle class under the assumptions of the model.
%
%
%\end{itemize}
%
%\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.4.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Consider the transition probability matrix
\begin{eqnarray*}
P=\begin{bmatrix} 
 0.9 & 0.1 & 0 & 0 & 0 & 0 & 0  \\
 0.9 & 0 & 0.1 & 0 & 0 & 0 & 0  \\
 0.9 & 0 & 0 & 0.1 & 0 & 0 & 0  \\
 0.9 & 0 & 0 & 0 & 0.1 & 0 & 0  \\
 0.9 & 0 & 0 & 0 & 0 & 0.1 & 0  \\
 0.9 & 0 & 0 & 0 & 0 & 0 & 0.1  \\
 0.9 & 0 & 0 & 0 & 0 & 0 & 0.1  \\
\end{bmatrix}
%\label{eq4-1}
\end{eqnarray*}

\item  We recognize this as a {\color{red}success runs} Markov chain. 

\item  We see that $P^8$ has all strictly positive entries, and therefore, $P$ is regular. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.5. Limiting theorem of Regular Markov chains. }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item   Let $P$ be a regular transition probability matrix on the states $0,1,...,N$. 

Then the limiting distribution $\pi = (\pi_0, \pi_1, \cdots , \pi_N)$ is the unique nonnegative solution of the equations
\begin{eqnarray}
\pi_j &=& \sum_{k=0}^{N} \pi_k P_{kj},\quad j=0,1,\cdots, N,  \label{eq4-2} \\
\sum_{k=0}^{N} \pi_k &=& 1.\label{eq4-3}
\end{eqnarray}


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.6. Proof.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{enumerate}

\item  Because the Markov chain is regular, we have a limiting distribution, $\lim_{n\to\infty} P^{(n)}_{ij} = \pi_j$, for which  $\sum_{k=0}^{N}\pi_k = 1$. 

\item  Write $P^n$ as the matrix product $P^{n-1}P$ in the form
\begin{eqnarray}
P^{(n)}_{ij} =  \sum_{k=0}^{N} P^{(n-1)}_{ik} P_{kj},\quad j = 0,\cdots,N,
\label{eq4-4}
\end{eqnarray}
and now let $n \to\infty$. 

\item  Then, $P^{(n)}_{ij} \to \pi_j$, while $P^{(n-1)}_{ik} \to \pi_k$, and (\ref{eq4-4}) passes into the claimed $$\pi_j = \sum_{k=0}^{N} \pi_kP_{kj}.$$

\item  It remains to show that the solution is unique. 

\end{enumerate}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.7.  Doubly stochastic matrices}

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  A transition probability matrix is called {\color{red}doubly stochastic} if the columns sum to one as well as the rows. 
Formally, $P = (P_{ij})$ is doubly stochastic if
\begin{eqnarray*}
P_{ij} \ge 0 \text{ and }  \sum_{k} P_{ik} = \sum_{k}P_{kj} =1 \text{ for all } i,j.
%\label{eq4-4}
\end{eqnarray*}

\item  Consider a doubly stochastic transition probability matrix on the $N$ states $0,1,\cdots, N-1$. 
If the matrix is regular, then the unique limiting distribution is the uniform distribution $\pi = (1/N,\cdots,1/N)$. 

\item  Because there is only one solution to $\pi_j =  \sum_{k}\pi_kP_{kj}$ and  $\sum_{k}\pi_k = 1$ when $P$ is regular, we need only to check that $π = (1/N,...,1/N)$ is a solution where $P$ is doubly stochastic in order to establish the claim. 

%\item  By using the doubly stochastic feature  $\sum_{j}P_{jk} = 1$, we verify that $\frac{1}{N} = \sum_j \frac{1}{N} P_{jk}$.  

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.8. Interpretation of the limiting distribution}

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Given a regular transition matrix $P$ for a Markov process $\{X_n\}$ on the $N+1$ states $0,1,...,N$, we solve the linear equations
\begin{eqnarray*}
\pi_i &=&  \pi_k P_{ki} \quad \text{ for } i = 0,1,\cdots,N, \\
1 &=& \pi_0 + \pi_1 + \cdots + \pi_N.
%\label{eq4-4}
\end{eqnarray*}

\item  The primary interpretation of the solution $(\pi_0,\cdots,\pi_N)$ is as the limiting distribution
\begin{eqnarray*}
\pi_j = \lim\limits_{n\to\infty} P^{(n)}_{ij} = \lim\limits_{n\to\infty} \mathbb{P}\{X_n =j \mid X_0 =i\}.
%\label{eq4-4}
\end{eqnarray*}

\item  In words, after the process has been in operation for a long duration, the probability of finding the process in state $j$ is $\pi_j$, irrespective of the starting state.

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.9.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  There is a second interpretation of the limiting distribution $\pi = (\pi_0,\pi_1,\cdots,\pi_N)$ that plays a major role in many models. 

\item  We claim that $\pi_j$ also gives the long run mean fraction of time that the process $\{X_n\}$ is in state $j$. 

\item  Thus, if each visit to state $j$ incurs a `cost' of $c_j$, then the long run mean cost per unit time associated with this Markov chain is
$\sum\limits_{k=0}^{N} \pi_j c_j.$

\item  To verify this interpretation, recall that if a sequence $a_0,a_1,\cdots$ of real numbers converges to a limit $a$, then the averages of these numbers also converge in the manner 
$
\lim\limits_{m\to\infty} \frac{1}{m} \sum\limits_{k=0}^{m-1} a_k = a.
$

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.10. Optimal Replacement Rules }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  A common industrial activity is the periodic inspection of some system as part of a procedure for keeping it operative. 

\item  After each inspection, a decision must be made whether or not to alter the system at that time. 

\item  If the inspection procedure and the ways of modifying the system are fixed, an important problem is that of determining, according to some cost criterion, the optimal rule for making the appropriate decision. 

\item  Here, we consider the case in which the only possible act is to replace the system with a new one.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.11.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Suppose that the system is inspected at equally spaced points in time and that after each inspection it is classified into one of the $L+1$ possible states $0,1,\cdots,L$. 

\item  A system is in state 0 if and only if it is new and is in state $L$ if and only if it is inoperative. 

\item  Let the inspection times be $n=0,1,\cdots$, and let $X_n$ denote the observed state of the system at time $n$. 

\item  In the absence of replacement, we assume that $\{X_n\}$ is a Markov chain with transition probabilities $p_{ij} = \mathbb{P}\{X_{n+1} = j\mid X_n = i\}$ for all $i$, $j$, and $n$.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.12.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  It is possible to replace the system at any time before failure. 

\item  The motivation for doing so may be to avoid the possible consequences of further deterioration or of failure of the system. 

\item  A replacement rule, denoted by $R$, is a specification of those states at which the system will be replaced. 

\item  Replacement takes place at the next inspection time. 

\item  A replacement rule $R$ modifies the behavior of the system and results in a modified Markov chain $\{X_n(R);n = 0,1,\cdots\}$. 

\item  The corresponding modified transition probabilities $p_{ij}(R)$ are given by $p_{ij}(R) = p_{ij}$ if the system is not replaced at state $i$, and $p_{i0}(R) = 1, p_{ij}(R) = 0, j\neq 0$ if the system is replaced at state $i$.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.13.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  It is assumed that each time the equipment is replaced, a replacement cost of $K$ units is incurred. 

\item  Further it is assumed that each unit of time the system is in state $j$ incurs an operating cost of $a_j$. 

\item  Note that $a_L$ may be interpreted as failure (inoperative) cost. 

\item  This interpretation leads to the one period cost function $c_i(R)$ given for $i = 0,\cdots,L$ by
\begin{eqnarray*}
 c_i(R)= \left\{ \begin{array}{ll}
 a_i, & \text{ if } p_{i0}(R) = 0, \\
 K+a_i, & \text{ if } p_{i0}(R)=1.
 \end{array}\right.
%\label{eq4-2-4}
\end{eqnarray*}


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{3.14.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  We are interested in replacement rules that minimize the expected long run time average cost. 
%
This cost is given by the expected cost under the limiting distribution for the Markov chain $\{X_n(R)\}$. 

\item  Denoting this average cost by $\phi(R)$, we have
%\begin{eqnarray*}
$\phi(R) = \sum_{i=0}^{L} \pi_i(R)c_i(R),
$%\label{eq4-2-4}
%\end{eqnarray*}
 where $\pi_i(R) = \lim_{n\to\infty} \mathbb{P}\{X_n(R) = i\}$. 

\item  We define a {\color{red}control limit rule} to be a replacement rule of the form `Replace the system if and only if $X_n \ge k$, '
where $k$, called the control limit, is some fixed state between 0 and $L$. 

\item  We let $R_k$ denote the control limit rule with control limit equal to $k$. Then, $R_0$ is the rule `Replace the system at every step', and $R_L$ is the rule `Replace only upon failure (State $L$)'.

\end{itemize}

\end{frame}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\end{document}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%









