
%!TEX program = xelatex
%!TEX TS-program = xelatex
%!TEX encoding = UTF-8 Unicode

\documentclass[10pt]{article} 

\input{wang_preamble.tex}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{titling}
\setlength{\droptitle}{-2cm}   % This is your set screw

%%文档的题目、作者与日期
\author{Class 2019 Math and Applied Math }
\title{Applied Stochastic Processes - Quiz 01 - Solution}
%\date{\vspace{-3ex}}
%\renewcommand{\today}{\number\year \,年 \number\month \,月 \number\day \,日}
%\date{2021 年 2 月 28 日}
\date{April 6, 2021}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{document}

\maketitle

\begin{enumerate}

\item
A Markov chain $\{X_n|n\ge 0\}$ on state space $S=\{0,1,2\}$ has the transition probability matrix $P$ and initial distribution $p_0,p_1,p_2$.
\begin{eqnarray*}
P=\begin{blockarray}{cccc}
  & 0 & 1 & 2 \\
\begin{block}{c[ccc]}
0& 0.1&0.1&0.8\\
1& 0.3&0.3&0.4\\
2& 0.2&0.3&0.5\\
\end{block}
\end{blockarray},
\hspace{1cm}
\left\{
\begin{array}{l}
p_0=P[X_0=0]=0.2,\\
p_1=P[X_0=1]=0.2,\\
p_2=P[X_0=2]=0.6.\\
\end{array}\right.
\end{eqnarray*}
\begin{enumerate}
\item Determine the probability $P[X_0=0,X_1=1,X_2=2]$.
\item Determine the conditional probability $P[X_2=2|X_1=2,X_0=2]$.
\end{enumerate}

{\it \color{red} \underline{Solution.}

(a) By the definition of conditional probability, and Markov property, we have 
\begin{eqnarray*}
&&P[X_0=0,X_1=1,X_2=2]\\
&=&P[X_0=0]P[X_1=1|X_0=0]P[X_2=2|X_1=1,X_0=0]\\
&=&P[X_0=0]P[X_1=1|X_0=0]P[X_2=2|X_1=1]\\
&=&p_{0}p_{01}p_{12}=(0.2)(0.1)(0.4)=0.08.
\end{eqnarray*}

(b) By Markov property and the transition probability matrix, we have 
\begin{eqnarray*}
&&P[X_2=2 | X_1=2,X_0=2]\\
&=&P[X_2=2|X_1=2]\\
&=&p_{22}=0.5.
\end{eqnarray*}

}


%\item
%Consider a Markov chain $\{X_n|n\ge 0\}$ with state space $S=\{1,2\}$.
%Suppose that $p_{12}=1/4$, $p_{11}=3/4$, $p_{21}=1/5$, and $p_{22}=4/5$.
%\begin{enumerate}
%%\item
%%Write the transition probability matrix $P=(p_{ij})$.
%\item
%Compute the first return distribution $f^{(n)}_{11}, n\ge 1$, and the return probability $f_{11}$.
%\item
%Compute the mean recurrence time $\mu_1=\sum\limits_{n=1}^{\infty}nf^{(n)}_{11}$.
%\item
%Compute the period of the state 1, and determine if the state 1 is ergodic.
%%\item
%%Do the same as in (b,c,d) to the state 2.
%\item
%Consider CK equation. Compute $\pi_1=\lim\limits_{n\to\infty}p^{(n)}_{i1}$ and $\pi_2=\lim\limits_{n\to\infty}p^{(n)}_{i2}$.
%\item
%Compute the limiting distribution and stationary distribution.
%\end{enumerate}
%
%{\it \color{red} \underline{Solution.}
%
%(a) $f^{(1)}_{11}=(3/4)$, $f^{(2)}_{11}=(1/4)(1/5)$, $f^{(3)}_{11}=(1/4)(4/5)(1/5)$, and thus
%\begin{eqnarray*}
%f^{(n)}_{11}=(1/4)(4/5)^{n-2}(1/5) \textrm{ for } n\ge 3.
%\end{eqnarray*}
%
%The return probability is
%\begin{eqnarray*}
%f_{(11)}=f^{(1)}_{11}+f^{(2)}_{11}+\cdots+f^{(n)}_{11}+\cdots=??
%\end{eqnarray*}
%
%(b) The mean recurrence time is
%\begin{eqnarray*}
%\mu_{11}=f^{(1)}_{11}+2f^{(2)}_{11}+\cdots+nf^{(n)}_{11}+\cdots=??
%\end{eqnarray*}
%
%(c) The period of the state 1 is $d(1)=gcd\{n\ge 1| p^{(n)}_{11}>0\}=gcd\{1,2,3,\cdots\}=1$. Since $f_{11}=1$ and $\mu_{11}<\infty$, the state 1 is ergodic.
%
%(d) The 1-step and $n$-step transition probability matrices are
%\begin{eqnarray*}
%P=\begin{bmatrix} p_{11} & p_{12} \\ p_{21} & p_{22} \\ \end{bmatrix}
%=\begin{bmatrix} 3/4 & 1/4 \\ 1/5 & 4/5 \\ \end{bmatrix}, \hspace{0.5cm}
%P^{(n)}=\begin{bmatrix} p^{(n)}_{11} & p^{(n)}_{12} \\ p^{(n)}_{21} & p^{(n)}_{22} \\ \end{bmatrix}=??
%\end{eqnarray*}
%
%CK equation says that $P^{(n)}=P^n$. Thus we need to compute $P^n$. Compute the eigenvalue and eigenvector we get $P=Q\Lambda Q^{-1}$, where $\Lambda$ is the diagonal matrix with eigenvalues on the diagonal, and each column of $Q$ is the corresponding eigenvector. Thus we have
%\begin{eqnarray*}
%P^n = (Q\Lambda Q^{-1})(Q\Lambda Q^{-1})\cdots (Q\Lambda Q^{-1})=Q\Lambda^n Q^{-1}=??
%\end{eqnarray*}
%
%By taking the limit, we get the limiting distribution
%\begin{eqnarray*}
%\lim\limits_{n\to\infty} P^n = \begin{bmatrix} \pi_{1} & \pi_{2} \\ \pi_{1} & \pi_{2} \\ \end{bmatrix}.
%\end{eqnarray*}
%
%(e) For stationary distribution, we suppose it is $\pi=[\pi_1,\pi_2]$. Then by the definition, it must satisfy $\pi P=\pi$, that is,
%\begin{eqnarray*}
%\begin{bmatrix} \pi_{1} & \pi_{2} \end{bmatrix}
%\begin{bmatrix} p_{11} & p_{12} \\ p_{21} & p_{22} \\ \end{bmatrix}
%=\begin{bmatrix} \pi_{1} & \pi_{2} \end{bmatrix}.
%\end{eqnarray*}
%
%Finally we see that $\pi=[4/9,5/9]$.
%
%}

%\item
%%Problem 3.3.7.
%A component in a system is placed into service, where it operates until its failure, whereupon it is replaced at the end of the period with a new component having statistically identical properties, and the process repeats. The probability that a component lasts for $k$ periods is $a_k$, for $k= 1,2,\cdots$. Let $X_n$ be the remaining life of the component in service at the end-of-period $n$. Then, $X_n = 0$ means that $X_{n+1}$ will be the total operating life of the next component. Give the transition probabilities for the Markov chain $\{X_n|n\ge 0\}$.
%
%{\it \color{red} \underline{Solution.}
%The state space is $S=\{0,1,2,3,\cdots\}$. For $k\ge 1$, $X_n=k$ means that the remaining lifetime of the component at time $t=n$ is $k$, thus its remaining lifetime at time $t=n+1$ is $k-1$, thus $P[X_{n+1}=k-1|X_n=k]=1$ for $k\ge 1$.
%
%Now $X_n=0$ means that the remaining lifetime of the component at time $t=n$ is zero, thus a new component is applied at time $n+1$. Since each component has a lifetime $k$ with probability $a_k$, where the possible values of $k$ are $1,2,3,\cdots$, so in this condition, $P[X_{n+1}=k-1]=a_k$.
%
%Finally we found the 1-step transition probability matrix is
%\begin{eqnarray*}
%\begin{blockarray}{cccccc}
%  & 0 & 1 & 2 & 3 & \cdots \\
%\begin{block}{c[ccccc]}
%0 & a_1&a_2&a_3&a_4&\cdots \\
%1 & 1&0&0&0&\cdots \\
%2 & 0&1&0&0&\cdots \\
%3 & 0&0&1&0&\cdots\\
%\vdots & \vdots&\vdots&\vdots&\vdots&\\
%\end{block}
%\end{blockarray}
%\end{eqnarray*}
%
%\begin{comment}
%\begin{eqnarray*}
%P=\begin{bmatrix}
%a_1&a_2&a_3&a_4&\cdots \\
%1&0&0&0&\cdots \\
%0&1&0&0&\cdots \\
%0&0&1&0&\cdots\\
%\vdots&\vdots&\vdots&\vdots&\\
%\end{bmatrix}
%\end{eqnarray*}
%\end{comment}
%
%}

%\newpage

\item
%Ex.3.4.4
A coin is tossed repeatedly until two successive heads appear. Find the mean number of tosses required.

Hint: Let $X_n$ be the cumulative number of successive heads. The state space is \{0,1,2\}. Determine the mean time to reach state 2 starting from state 0 by invoking a first step analysis.


{\it \color{red} \underline{Solution.}
An example of the tosses show the definition of $X_n$:

\begin{table}[h]
\centering
\begin{tabular}{c|cccccccccc}
$n$    &0&1&2&3&4&5&6&7&8&9\\
\hline
toss   &&H&T&H&T&H&T&T&H&H\\
\hline
$X_n$ &0&1&0&1&0&1&0&0&1&2\\
\end{tabular}
\end{table}

Thus the state space is $S=\{0,1,2\}$, and the transition probability matrix is
\begin{eqnarray*}
P=\begin{blockarray}{cccc}
  & 0 & 1 & 2 \\
\begin{block}{c[ccc]}
0 & 1/2 &1/2 &0   \\
1 & 1/2 &0   &1/2 \\
2 & 0   &0   &1   \\
\end{block}
\end{blockarray}
\end{eqnarray*}
Let $u_{02}$ and $u_{12}$ be the mean number of tosses required to reach state 2 from state 0 and 1. Then by first step analysis, we have the following system of equations,
\begin{eqnarray*}\left\{\begin{array}{rcl}
u_{02} &=& \frac{1}{2}(1+u_{02}) +\frac{1}{2}(1+u_{12}) \\
u_{12} &=& \frac{1}{2}(1) +\frac{1}{2}(1+u_{02}). 
\end{array}\right.
\end{eqnarray*}
This gives $u_{12}=4$ and $u_{02}=6$. 

%We need to compute the mean number of steps to reach the state 2, given that initially $X_0=0$. It remains to compute $f^{(n)}_{02}$ for $n=1,2,3,\cdots$. Then we have
%\begin{eqnarray*}
%\mu_{02}=f^{(1)}_{02}+2f^{(2)}_{02}+\cdots+nf^{(n)}_{02}+\cdots.
%\end{eqnarray*}
%
%We have the following relation between $f^{(n)}_{ij}$ and $p^{(n)}_{ij}$, which helps us find $f^{(n)}_{02}$ from the matrix $P$.
%In this problem, $p^{(k)}_{22}=1$ for all $k$, so it will simplify the computation.
%\begin{eqnarray*}
%p^{(1)}_{ij} &=& f^{(1)}_{ij}\\
%p^{(2)}_{ij} &=& f^{(1)}_{ij}p^{(1)}_{jj}+f^{(2)}_{ij}\\
%p^{(3)}_{ij} &=& f^{(1)}_{ij}p^{(2)}_{jj}+f^{(2)}_{ij}p^{(1)}_{jj}+f^{(3)}_{ij}\\
%p^{(n)}_{ij} &=& f^{(1)}_{ij}p^{(n-1)}_{jj}+f^{(2)}_{ij}p^{(n-2)}_{jj}+\cdots+f^{(n-1)}_{ij}p^{(1)}_{jj}+f^{(n)}_{ij}.
%\end{eqnarray*}

}



\item
%Ex.3.3.5
An urn initially contains a single red ball and a single green ball. A ball is drawn at random, removed, and replaced by a ball of the opposite color, and this process repeats so that there are always exactly two balls in the urn. Let $X_n$ be the number of red balls in the urn after $n$ draws, with $X_0 = 1$. Specify the transition probabilities for the Markov chain $\{X_n|n\ge 0\}$. What is the initial distribution?

{\it \color{red} \underline{Solution.}
The state space is $S=\{0,1,2\}$.

Let $p_i:=P[X_0=i], (i=0,1,2)$ be the initial distribution. Then $p_0=0$, $p_1=1$, $p_2=0$.

The transition probability matrix is
\begin{eqnarray*}
P=\begin{bmatrix}
0&1&0\\
0.5&0&0.5\\
0&1&0
\end{bmatrix}.
\end{eqnarray*}

}



%\item
%%Ex.3.4.5
%A coin is tossed repeatedly until either two successive heads appear or two successive tails appear. Suppose the first coin toss results in a head. Find the probability that the game ends with two successive tails.
%
%{\it \color{red} \underline{Solution.}
%We let the state space to be $S=\{HT,TH,TT,HH\}$, and the transition matrix is
%\begin{eqnarray*}
%P=\begin{blockarray}{ccccc}
%  & HT & TH & TT & HH \\
%\begin{block}{c[cccc]}
%HT & 0   &1/2 &1/2 &0    \\
%TH & 1/2 &0   &0   &1/2  \\
%TT & 0   &0   &1   &0    \\
%HH & 0   &0   &0   &1    \\
%\end{block}
%\end{blockarray}
%\end{eqnarray*}
%
%Since the first toss is H, we make the initial distribution based on the situation after two tosses:
%\begin{table}[h]
%\centering
%\begin{tabular}{r|cccc}
%$Y_0$ &HT&TH&TT&HH\\
%\hline
%Probability &1/2&0&0&1/2\\
%\end{tabular}
%\end{table}
%
%Thus the initial distribution is $\pi_0=\begin{bmatrix} 1/2 & 0 & 0 & 1/2 \end{bmatrix}$.
%
%By some mathematical software we compute the limiting distribution
%\begin{eqnarray*}
%\lim\limits_{n\to\infty} \pi_0\cdot P^n =\begin{bmatrix} 0 & 0 & 1/3 & 2/3 \end{bmatrix}.
%\end{eqnarray*}
%
%}
%

%\item
%%Problem.3.4.14.
%\begin{enumerate}
%\item   A single die is rolled repeatedly. The game stops the first time that the sum of two successive rolls is either 5 or 7. What is the probability that the game stops at a sum of 5?
%%Problem.3.5.4.
%\item   Martha has a fair die with the usual six sides. She throws the die and records the number. She throws the die again and adds the second number to the first. She repeats this until the cumulative sum of all the tosses first exceeds 10. What is the probability that she stops at a cumulative sum of 13?
%\end{enumerate}
%
%{\it \color{red} \underline{Solution.}
%
%(a) Let $\{\xi_1,\xi_2,\cdots,\xi_n,\cdots\}$ be a sequence of independent identically distributed random variables with the following distribution
%\begin{table}[h]
%\centering
%\begin{tabular}{c|cccccc}
%$\xi_n$    &1&2&3&4&5&6\\
%\hline
%Probability   &1/6  &1/6  &1/6  &1/6  &1/6  &1/6  \\
%\end{tabular}
%\end{table}
%
%Define a Markov chain $\{X_n=(\xi_{n-1},\xi_n), n\ge 2\}$. Thus the state space is
%\begin{eqnarray*}
%S=\{(1,1),(1,2),\cdots, (1,6),(2,1),\cdots, (6,6)\}.
%\end{eqnarray*}
%
%There are 36 states. Without the consideration of stop-of-game criterion, which is successive sum being 5 or 7, the general transition probability from state $(i,j)$ to $(k,\ell)$ is $1/6$ if $j=k$, and zero if $j\neq k$.
%
%Now with the stop-of-game criterion, we make the states $(1,4),(2,3),(3,2),(4,1)$ and the states $(1,6),(2,5),(3,4),(4,3),(5,2),(6,1)$ the absorbing states, that is, once the process enters these states, it stays there. We name these states as stop-with-five states and stop-with-seven states. And we write out the matrix $P$ with size $36\times 36$.
%
%Now we consider the initial distribution. It is the distribution of the random variable $X_2$. It takes values in the set $S$ with equal probability 1/36. And we write it as a row vector $ipd$ with size $1\times 36$. Here ipd is for 'initial probability distribution'.
%
%Finally we compute the limit of $ipd*P^n$ as $n\to\infty$, and get the sum of probabilities the process ends up in stop-with-five states, and the sum of probabilities the process ends up in stop-with-seven states. The following program says the probabilities are approximately 0.3856 and 0.6144.
%
%
%(b) Let $\{\xi_1,\xi_2,\cdots,\xi_n,\cdots\}$ be the same sequence random variables which records the rolls of dice.
%Define $X_n=\xi_1+\cdots+\xi_n$, then $\{X_n,n\ge 1\}$ is a Markov chain. The state space is $S=\{1,2,\cdots,n,\cdots\}$.
%
%Now we consider the stop-of-game condition: once $X_n>10$ we let $X_{n+1}=X_n$. Thus the state space becomes
%\begin{eqnarray*}
%S=\{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16\}.
%\end{eqnarray*}
%
%Since $X_{n+1}=X_n+\xi_{n+1}$, we can write out the nonzero transition probabilities
%\begin{eqnarray*}
%&& p_{12}=p_{13}=\cdots=p_{17}=1/6,\\
%&& p_{23}=p_{24}=\cdots=p_{28}=1/6,\\
%&& \cdots\\
%&& p_{10,11}=p_{10,12}=\cdots=p_{10,16}=1/6,\\
%&& p_{11,11}=1,\\
%&& \cdots\\
%&& p_{16,16}=1.
%\end{eqnarray*}
%And these numbers form a $16\time 16$ matrix $P$.
%
%
%We let the initial distribution be the distribution of $X_1$. Thus
%
%\begin{table}[h]
%\centering
%\begin{tabular}{c|cccccccccccccccc}
%state & 1&2&3&4&5&6&7&8&9&10&11&12&13&14&15&16\\
%\hline
%ipd & 1/6 &1/6 &1/6 &1/6 &1/6 &1/6 &0 &0 &0 &0 &0 &0 &0 &0 &0 &0 \\
%\end{tabular}
%\end{table}
%
%Now compute $ipd*P^n$ for a large $n$. And look at the probability $P[X_n=13]\approx 0.1819$.
%
%}
%


%\item
%Let $0<p<1, p\neq 1/2$ and $q=1-p$. Suppose that $S=\mathbb{Z}$. Let $\eta_n, n=1,2,\cdots$ be a sequence of independent identically distributed random variables with $P[\eta_1=1]=p$ and $P[\eta_1=-1]=q$. Define $X_n=\eta_1+\cdots+\eta_n$ for $n\ge 1$ and $X_0=0$.
%Let $f^{(n)}_{00}$ be the probability of first return in $n$ steps starting from state 0.
%\begin{enumerate}
%\item Find the one-step transition probabilities $P(X_{n+1}=j|X_n=i)$ for $i,j\in S$.
%%\item Prove that when $n\to\infty$, the probability $P(X_n=0|X_0=0)$ goes to zero.
%\item Compute the first return distribution $f^{(n)}_{00}, n\ge 1$.
%%\item Compute $P(\max\limits_{1\le n\le 3}X_n\le 2)$.
%\item Prove that the return probability $f_{00}=\sum\limits_{n=1}^{\infty} f^{(n)}_{00}=1-|p-q|$.
%\item Consider $p=1/2$. Show that $0$ is a null-recurrent state.
%\end{enumerate}
%
%{\it \color{red} \underline{Solution.}
%
%(a) The state space $S$ is the set of all integers
%\begin{eqnarray*}
%S=\{\cdots,\, -4,\,-3,\,-2,\,-1,\,0,\,1,\,2,\,3,\,4,\,\cdots\}.
%\end{eqnarray*}
%
%The one-step transition probability $p_{i,i+1}=p$, $p_{i,i-1}=q$. This is one dimensional random walk on integers.
%
%(b) The return probability and first return probability are defined to be
%\begin{eqnarray*}
%p_{00}^{(n)} &=& P \Big{[} X_n=0 \,\Big{|}\, X_0=0 \Big{]},\,\, n\ge 1\\
%f_{00}^{(n)} &=& P \Big{[} X_n=0, X_{n-1}\neq 0, \cdots, X_{1}\neq 0 \,\Big{|}\, X_0=0 \Big{]},\,\, n\ge 1.
%\end{eqnarray*}
%
%I am not able to compute this first return probability yet.
%
%(c) For return probability in $2k$ steps, we choose $k$ steps among the total $2k$ steps going in one direction, and the other $k$ steps coming back, thus
%\begin{eqnarray}
%p_{00}^{(2k+1)} = 0, \textrm{ and }\,\,\,
%p_{00}^{(2k)} = \frac{(2k)!}{k!k!}p^kq^k.
%\label{return-prob}
%\end{eqnarray}
%
%By the total probability formula, we have the following formula
%\begin{eqnarray}
%p_{00}^{(n)} = f_{00}^{(1)}p_{00}^{(n-1)} + f_{00}^{(2)}p_{00}^{(n-2)} + \cdots + f_{00}^{(n-1)}p_{00}^{(1)} + f_{00}^{(n)}.
%\label{return-first-relation-element}
%\end{eqnarray}
%
%From this formula we get
%\begin{eqnarray}
%&& p_{00}^{(1)} + p_{00}^{(2)} + \cdots + p_{00}^{(n)} +\cdots \nonumber \\
%&=& \Big{(} 1 + p_{00}^{(1)} + p_{00}^{(2)} + \cdots + p_{00}^{(n)} + \cdots \Big{)}
%  \times \Big{(} f_{00}^{(1)} +f_{00}^{(2)} + \cdots + f_{00}^{(n)} + \cdots \Big{)}.
%\label{return-first-relation}
%\end{eqnarray}
%
%From (\ref{return-first-relation}) and (\ref{return-prob}), we get the return probability
%\begin{eqnarray*}
%f_{00} &=&  f_{00}^{(1)} +f_{00}^{(2)} + \cdots + f_{00}^{(n)} + \cdots  \\
%&=& 1- \Big{(} 1 + p_{00}^{(1)} + p_{00}^{(2)} + \cdots + p_{00}^{(n)} + \cdots \Big{)}^{-1}\\
%&=& 1- \Big{(} 1 + p_{00}^{(2)} + p_{00}^{(4)} + \cdots + p_{00}^{(2k)} + \cdots \Big{)}^{-1}\\
%&=& 1- \Big{(} 1 + \frac{2!}{1!1!}pq + \frac{4!}{2!2!}(pq)^2 + \cdots + \frac{(2k)!}{k!k!}(pq)^k + \cdots \Big{)}^{-1}\\
%&=& 1-\sqrt{1-4pq}=1-\sqrt{p^2+2pq+q^2-4pq}=1-|p-q|.
%\end{eqnarray*}
%
%(d) When $p=1/2$, we see that $f_{00}=1$ from (c). So it is a recurrent state. To show it is null-recurrent, we need to compute
%\begin{eqnarray*}
%\mu_{00}= f_{00}^{(1)} + 2 f_{00}^{(2)} + \cdots + n f_{00}^{(n)} + \cdots
%\end{eqnarray*}
%
%and show that it is infinite. I do not know how to compute this yet. But if we can prove that when the state 0 is recurrent, then it is null-recurrent if and only if $\lim\limits_{n\to\infty} p_{00}^{(n)}=0$, then it is done. Thus it remains to compute the limit
%\begin{eqnarray*}
%\lim\limits_{k\to\infty} p_{00}^{(2k)} = \lim\limits_{k\to\infty} \frac{(2k)!}{k!k!}p^kq^k =??
%\end{eqnarray*}
%
%}


\item
%Ex.3.5.6.
A baseball trading card that you have for sale may be quite valuable. Suppose that the successive bids $\xi_1,\xi_2,\cdots$ that you receive are independent random variables with the geometric distribution $Pr[\xi=k] = 0.01 (0.99)^k$ for $k = 0,1,2,\cdots$. If you decide to accept any bid over \$100, how many bids, on the average, will you receive before an acceptable bid appears?

{\it \color{red} \underline{Solution.}

We define a Markov chain $\{X_n,n\ge 0\}$ with state space $S=\{0,1\}$. Initially $X_0=0$, and when $\xi_k \ge 100$ for some $k\ge 1$, we make $X_k=1$ and stay at 1 thereafter. let
\begin{eqnarray*}
p &=& P[\xi_k\ge 100]=0.01 (0.99)^{100} + 0.01 (0.99)^{101} + 0.01 (0.99)^{102} + \cdots \\
  &=& (0.99)^{100}\approx 0.3660.
\end{eqnarray*}

Now we compute the mean time of the transition from state 0 to state 1.
\begin{eqnarray*}
\mu_{01} &=& f_{01}^{(1)} + 2f_{01}^{(2)} + 3f_{01}^{(3)} + \cdots + nf_{01}^{(n)} + \cdots\\
         &=& p+2(1-p)p+3(1-p)^2p+\cdots+n(1-p)^{n-1}p+\cdots\\
         &=& 1/p \approx 2.7320.
\end{eqnarray*}

}


%\item
%%Ex.4.1.10.
%A bus in a mass transit system is operating on a continuous route with intermediate stops. The arrival of the bus at a stop is classified into one of three states, namely, 1. Early arrival; 2. On-time arrival; 3. Late arrival. Suppose that the successive states form a Markov chain with the following transition probability matrix. Over a long period of time, what fraction of stops can be expected to be late?
%\begin{eqnarray*}
%P=\begin{bmatrix}
%0.5& 0.4& 0.1\\
%0.2& 0.5& 0.3\\
%0.1& 0.2& 0.7\\
%\end{bmatrix}
%\end{eqnarray*}
%
%{\it \color{red} \underline{Solution.}
%
%We need to compute the limiting distribution. We compute the limit $P^n$ when $n\to\infty$, and this is done by a computer very quickly.
%Since this is an ergodic Markov chain, we can compute the stable distribution. Thus we solve the system of linear equations $\pi P=\pi$. That is,
%\begin{eqnarray*}
%\begin{bmatrix} \pi_1 & \pi_2 & \pi_3   \end{bmatrix}
%\begin{bmatrix} 0.5& 0.4& 0.1\\  0.2& 0.5& 0.3\\  0.1& 0.2& 0.7\\  \end{bmatrix}
%=\begin{bmatrix} \pi_1 & \pi_2 & \pi_3   \end{bmatrix}.
%\end{eqnarray*}
%Together with the condition $\pi_1+\pi_2+\pi_3=1$, we get
%\begin{eqnarray*}
%\begin{bmatrix} \pi_1 & \pi_2 & \pi_3   \end{bmatrix} =\begin{bmatrix} 0.225 & 0.35 & 0.425   \end{bmatrix}.
%\end{eqnarray*}
%
%}

%\item
%%Problem.4.1.5.
%The four towns A, B, C, and D are connected by railroad lines as shown in the following diagram. Each day, in whichever town it is in, a train chooses one of the lines out of that town at random and traverses it to the next town, where the process repeats the next day. In the long run, what is the probability of finding the train in town D?
%
%\begin{figure}[!ht]
%\centering
%%\begin{center}
%\includegraphics [height=3cm,width=0.4\textwidth]{four_towns.eps}
%%\end{center}
%\end{figure}
%
%{\it \color{red} \underline{Solution.}
%
%We write the transition probability matrix
%\begin{eqnarray*}
%P=\begin{blockarray}{rrrrr}
%  & A & B & C & D  \\
%\begin{block}{r[rrrr]}
%A  &0   &1/2 &0   &1/2    \\
%B  &1/3 &0   &1/3 &1/3    \\
%C  &0   &1   &0   &0      \\
%D  &1/2 &1/2 &0   &0      \\
%\end{block}
%\end{blockarray}.
%\end{eqnarray*}
%
%By the same algorithm as in the previous problem, we get the limiting distribution
%\begin{eqnarray*}
%\begin{bmatrix} \pi_1 & \pi_2 & \pi_3 & \pi_4  \end{bmatrix} =\begin{bmatrix} 0.25 & 0.375 & 0.125 & 0.25  \end{bmatrix}.
%\end{eqnarray*}
%
%}


\item
%Ex.4.2.1.
On a western Indian island, a sunny day is followed by another sunny day with probability 0.9, whereas a rainy day is followed by another rainy day with probability 0.2. Supposing that there are only sunny or rainy days, in the long run on what fraction of days is it sunny?

{\it \color{red} \underline{Solution.}

We write the transition probability matrix
\begin{eqnarray*}
P=\begin{blockarray}{rrr}
  & Rainy & Sunny  \\
\begin{block}{r[rr]}
Rainy  & 0.2 & 0.8     \\
Sunny  & 0.1 & 0.9      \\
\end{block}
\end{blockarray}.
\end{eqnarray*}

By the same algorithm as in the previous problem, we get the limiting distribution
\begin{eqnarray*}
\begin{bmatrix} \pi_1 & \pi_2  \end{bmatrix} =\begin{bmatrix} 1/9 & 8/9  \end{bmatrix}.
\end{eqnarray*}

}


\end{enumerate}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\end{document}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\begin{enumerate}
\item 
\item 
\item 
\item 
\end{enumerate}

\begin{itemize}
\item 
\item 
\end{itemize}


