%!TEX program = xelatex
%!TEX TS-program = xelatex
%!TEX encoding = UTF-8 Unicode

\documentclass[12pt,t,aspectratio=169,mathserif]{beamer}
%Other possible values are: 1610, 149, 54, 43 and 32. By default, it is to 128mm by 96mm(4:3).
%run XeLaTeX to compile.

\input{wang-slides-preamble.tex}

\begin{document}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title{Applied Stochastic Processes - Lecture 01}
\subtitle{(3.1 - 3.3) Markov Chains - Concepts and Examples}
%(3.1-3.3) 
%\institute{上海立信会计金融学院}
\author{MAP SK}
\date{March 9, 2021}

\maketitle

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\begin{enumerate}
%\item 第一讲：March 9 (3.1-3.3) Markov Chains - Concepts and Examples
%\item 第二讲：March 16 (3.4-3.5) Markov Chains - First Step Analysis, More Examples
%\item 第三讲：March 23 (4.1-4.4) Markov Chains - Long Run Behaviors
%\item 第四讲：April 13 (5.1-5.2) Poisson Processes - Concept and Examples 
%\item 第五讲：April 20 (5.3-5.4) Poisson Processes - Associated Distributions  
%\item 第六讲：May 11 (6.1-6.1) Markov Chains - Continuous Time, Pure Birth Processes
%\item 第七讲：May 18 (7.1-7.2) Renewal Processes - Renewal Function, Block Replacement
%\item 第八讲：May 25 (8.1-8.2) Brownian Motions -  the Reflection Principle 
%\end{enumerate}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{Contents }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}
\item  Stochastic processes
\item  Discrete-time Markov chain
\item  Transition probability
\item  Transition probability matrix
\item  Chapman-Kolmogorov equations
\item  An inventory model
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.1.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  A {\color{red}stochastic process} is a family of random variables $X_t$, where $t$ is a parameter running over a suitable index set $T$. Where convenient, we will write $X(t)$ instead of $X_t$. 

\item  In a common situation, the index $t$ corresponds to discrete units of time, and the index set is $T = \{0, 1, 2, \cdots\}$. In this case, $X_t$ might represent the outcomes at successive tosses of a coin, repeated responses of a subject in a learning experiment, or successive observations of some characteristics of a certain population. 

\item  Stochastic processes for which $T = [0, \infty)$ are particularly important in applications. Here $t$ often represents time, but different situations also frequently arise. For example, $t$ may represent distance from an arbitrary origin, and $X_t$ may indicate the number of defects in the interval $(0,t]$ along a thread, or the number of cars in the interval $(0,t]$ along a highway.

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.2.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Stochastic processes are distinguished by their {\color{red}state space}, or by the range of possible values for the random variables $X_t$, by their index set $T$, and by the dependence relations among the random variables $X_t$. 

\item  The most widely used classes of stochastic processes are systematically and thoroughly presented for study in the following chapters, along with the mathematical techniques for calculation and analysis that are most useful with these processes. 

\item  The use of these processes as models is taught by example. Sample applications from many and diverse areas of interest are an integral part of the exposition.


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.3. Discrete-time Markov chain }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  A {\color{red}Markov process} $\{X_t\}$ is a stochastic process with the property that, given the value of $X_t$,the values of $X_s$ for $s>t$ are not influenced by the values of $X_u$ for $u<t$. 

\item  In words, the probability of any particular future behavior of the process, when its current state is known exactly, is not altered by additional knowledge concerning its past behavior. 

\item  A {\color{red}discrete-time Markov chain} is a Markov process whose state space is a finite or countable set, and whose (time) index set is $T = (0, 1, 2, \cdots)$. In formal terms, the Markov property is that
\begin{eqnarray}
\mathbb{P}\{ X_{n+1} = j\mid X_0 = i_0,\cdots ,X_{n-1} = i_{n-1},X_n = i \}
= \mathbb{P}\{ X_{n+1} = j\mid X_n = i \}
\label{eq3-1}
\end{eqnarray}
for all time points $n$ and all states $i_0,\cdots,i_{n-1},i,j$.

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.4. Transition probability }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  It is frequently convenient to label the state space of the Markov chain by the non-negative integers $\{0, 1, 2, \cdots\}$, which we will do unless the contrary is explicitly stated, and it is customary to speak of $X_n$ as being in state $i$ if $X_n = i$.

\item  The probability of $X_{n+1}$ being in state $j$ given that $X_n$ is in state $i$ is called the {\color{red}one-step transition probability} and is denoted by $P^{n,n+1}_{ij}$. That is,
\begin{eqnarray}
P^{n,n+1}_{ij} = \mathbb{P}\{ X_{n+1} = j \mid X_n = i \}.
\label{eq3-2}
\end{eqnarray}

\item  The notation emphasizes that in general the {\color{red}transition probabilities} are functions not only of the initial and final states but also of the time of transition as well. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.5.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  When the one-step transition probabilities are independent of the time variable $n$, we say that the Markov chain has {\color{red}stationary} transition probabilities. 

\item  Since the vast majority of Markov chains that we shall encounter have stationary transition probabilities, we limit our discussion to this case. 

\item  Then, $P^{n,n+1}_{ij} = P_{ij}$ is independent of $n$, and $P_{ij}$ is the conditional probability that the state value undergoes a transition from $i$ to $j$ in one trial. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.6.  Transition probability matrix }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  It is customary to arrange these numbers $P_{ij}$ in a matrix, %in the infinite square array
{\small 
\begin{eqnarray*}
P=
\begin{blockarray}{ccccc}
& 0 & 1 & 2 & \cdots \\
\begin{block}{c[cccc]}
  0   & P_{00} & P_{01} & P_{02} & \cdots \\ 
  1   & P_{10} & P_{11} & P_{12} & \cdots \\ 
  2   & P_{20} & P_{21} & P_{22} & \cdots \\ 
  \vdots   & \vdots & \vdots & \vdots &  \\ 
\end{block}
\end{blockarray}.
\end{eqnarray*}
}
and refer to $P = (P_{ij})$ as the {\color{red}Markov matrix} or {\color{red}transition probability matrix} of the process. 


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.7. Chapman-Kolmogorov Equations }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  A Markov chain is completely defined by its one-step transition probability matrix and the specification of a probability distribution on the state of the process at time 0. 

\item  The analysis of a Markov chain concerns mainly the calculation of the probabilities of the possible realizations of the process. 

\item  Central in these calculations are the $n$-step transition probability matrices $P^{(n)} = (P^{(n)}_{ij})$. Here, $P^{(n)}_{ij}$ denotes the probability that the process goes from state $i$ to state $j$ in $n$ transitions. Formally,
\begin{eqnarray}
P^{(n)}_{ij} = \mathbb{P}\{ X_{m+n} = j \mid X_m = i \}. 
\label{eq3-10}
\end{eqnarray}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.8.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  Observe that we are dealing only with {\color{blue}temporally homogeneous} processes having stationary transition probabilities, since otherwise the left side of (\ref{eq3-10}) would also depend on $m$.

\item  The Markov property allows us to express (\ref{eq3-10}) in terms of $(P_{ij})$ as stated in the following theorem.

\item  Theorem 3.1. The $n$-step transition probabilities of a Markov chain satisfy
\begin{eqnarray}
P^{(n)}_{ij} = \sum\limits_{k=0}^{\infty} P_{ik}P^{(n-1)}_{kj}
\label{eq3-11}
\end{eqnarray}
where we define
\begin{eqnarray*}
P^{(0)}_{ij} = \left\{ \begin{array}{ll} 1 & \text{ if } i=j, \\ 0 & \text{ if } i\neq j. \end{array} \right.
%\label{eq3-11}
\end{eqnarray*}

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.9.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  From the theory of matrices, we recognize the relation (\ref{eq3-11}) as the formula for matrix multiplication so that $P^{(n)} = P \times  P^{(n-1)}$. 

\item  By iterating this formula, we obtain 
\begin{eqnarray}
P^{(n)} =P\times P\times \cdots \times P (n \text{ factors })= P^n; 
\label{eq3-11}
\end{eqnarray}
in other words, the $n$-step transition probabilities $P^{(n)}_{ij}$ are the entries in the matrix $P^n$, the $n$th power of $P$.

\item  Proof. The proof proceeds via a {\color{red}first step analysis}, a breaking down, or analysis, of the possible transitions on the first step, followed by an application of the Markov property. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.10.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Markov chains can be used to {\color{red}model and quantify} a large number of natural physical, biological, and economic phenomena that can be described by them. 

\item  This is enhanced by the amenability of Markov chains to quantitative manipulation. 

\item  In this section, we give several examples of Markov chain models that arise in various parts of science. 

\item  General methods for computing certain functionals on Markov chains are derived in the following section.

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.11.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  Consider a situation in which a commodity is stocked in order to satisfy a continuing demand. 

\item  We assume that the {\color{red}replenishment} of stock takes place at the end of periods labeled $n=0,1,2,\cdots$, and we assume that the total aggregate demand for the commodity during period $n$ is a random variable $\xi_n$ whose distribution function is independent of the time period,
\begin{eqnarray}
\mathbb{P}\{\xi_n = k\} = a_k \text{ for } k = 0,1,2,\cdots,
\label{eq3-14}
\end{eqnarray}
where $a_k \ge 0$ and  $\sum_{k=0}^{\infty} a_k = 1$. 

\item  The stock level is examined at the end of each period. 

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.12. An Inventory Model }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}


\item  A replenishment policy is prescribed by specifying two nonnegative critical numbers $s$ and $S > s$ whose interpretation is, if the end-of-period stock quantity is not greater than s, then an amount sufficient to increase the quantity of stock on hand up to the level $S$ is immediately procured. 

\item  If, however, the available stock is in excess of $s$, then no replenishment of stock is undertaken. 

\item  Let $X_n$ denote the quantity on hand at the end of period $n$ just prior to restocking. 

\item  The states of the process $\{X_n\}$ consist of the possible values of stock size
\begin{eqnarray*}
S, S-1, \cdots,1, 0, -1, -2,\cdots,
%\label{eq3-14}
\end{eqnarray*}
where a negative value is interpreted as an unfilled demand that will be satisfied immediately upon restocking.

\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.13.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  The process $\{X_n\}$ is depicted in the Figure.

\begin{figure}
\centering
\includegraphics[height=0.6\textheight, width=0.9\textwidth]{pic/inventory-process.png}
% \caption{ }
\end{figure}


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[fragile=singleslide]{6.1.1. }
\begin{frame}{1.14.  }

\vspace{-0.4cm}\noindent\makebox[\linewidth]{\rule{\paperwidth}{0.4pt}}
%每页详细内容

\begin{itemize}

\item  According to the rules of the inventory policy, the stock levels at two consecutive periods are connected by the relation
\begin{eqnarray}
X_{n+1} = \left\{ \begin{array}{ll}
X_n-\xi_{n+1}, & \text{ if } s< X_n <S, \\
S-\xi_{n+1}, & \text{ if } X_n\le s,
\end{array}\right.
\label{eq3-15}
\end{eqnarray}
where $\xi_n$ is the quantity demanded in the $n$th period, stipulated to follow the probability law (\ref{eq3-14}). 

\item  If we assume that the successive demands $\xi_1,\xi_2,\cdots$ are independent random variables, then the stock values $X_0,X_1, X_2,\cdots$ constitute a {\color{red}Markov chain} whose {\color{red}transition probability matrix} can be calculated in accordance with relation (\ref{eq3-15}).


\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\end{document}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%









