%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% This file is part of the book
%%
%% Probability and Its Applications
%% http://code.google.com/p/probability-book/
%%
%% Copyright (C) 2010 Minh Van Nguyen <nguyenminh2@gmail.com>
%%
%% See the file COPYING for copying conditions.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\chapter{Events and Probability}
\index{event}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Motivating example}

To motivate our study of events\index{event} and probability, imagine
we want to verify polynomial identities. In order to verify that two
polynomials\index{polynomial!identity verification} are equivalent, we
express each polynomial in canonical
form\index{polynomial!canonical form} and compare the canonical forms
term by term. Say we want to verify the polynomial identity
\[
F(x)
\overset{?}{\equiv}
G(x)
\]
by converting each of $F(x)$ and $G(x)$ to canonical
form\index{polynomial!canonical form}. Suppose $F(x)$ is given as a
product
\[
F(x)
=
\prod_{i=1}^d (x - a_i).
\]
Multiplying out $F(x)$ requires $\Theta(d^2)$ multiplications. That
is, a simple way to verify a polynomial
identity\index{polynomial!identity verification} is to multiply out
each polynomial and simplify the result, then compare the simplified
polynomials term by term. While this method is simple, its runtime is
quadratic in the degree of each polynomial.

A faster technique is to use a randomized
algorithm\index{randomized algorithm}. Let $d$ be the maximum
degree\index{polynomial!degree} of $F(x)$ and $G(x)$. The randomized
procedure\index{randomized algorithm} proceeds as presented in
Algorithm~\ref{alg:events:verify_polynomial_identity}.

\begin{algorithm}[!htpb]
\index{polynomial!identity verification}
\input{algorithm/events/verify-polynomial-identity.tex}
\caption{Verify polynomial identity via a randomized algorithm.}
\label{alg:events:verify_polynomial_identity}
\end{algorithm}

If $F(r) = G(r)$, then
Algorithm~\ref{alg:events:verify_polynomial_identity} outputs the
result that $F(x)$ and $G(x)$ are equivalent. But how confident are we
of this answer? To understand how the randomized algorithm gives an
incorrect result, we perform a case analysis.

\begin{enumerate}
\item If $F(x) \equiv G(x)$ and $F(r) = G(r)$, then
  Algorithm~\ref{alg:events:verify_polynomial_identity} gives a
  correct answer for any value of $r$, regardless of whether or not
  $r \in \{1, 2, \dots, 100d\}$.

\item If $F(x) \not\equiv G(x)$ and $F(r) \neq G(r)$, then the
  algorithm returns another correct answer.

\item If $F(x) \equiv G(x)$ and $F(r) \neq G(r)$, then the algorithm
  returns an incorrect answer.

\item Finally if $F(x) \not\equiv G(x)$ and $F(r) = G(r)$, then the
  algorithm returns an incorrect answer. In this case, the polynomials
  are not equivalent and we happen to choose a point of intersection
  between them. That is, $r$ must be a root of $F(x) - G(x) = 0$. The
  degree\index{polynomial!degree} of $F(x) - G(x)$ is at most $d$. By
  the fundamental theorem of
  algebra\index{fundamental theorem of algebra}, $F(x) - G(x)$ has at
  most $d$ roots. If $F(x) \equiv G(x)$, then there are no more than
  $d$ values in $\{1, 2, \dots, 100d\}$ for which $F(r) = G(r)$. In
  other words, we have a probability of at most
  \[
  \frac{d}{100d}
  =
  \frac{1}{100}
  \]
  that the algorithm returns a wrong
  answer\index{randomized algorithm!failure probability}.
\end{enumerate}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Axioms of probability}
\index{probability!axioms}

A \emph{probability space}\index{probability!space} consists of the
following components:
%%
\begin{enumerate}
\item A \emph{sample space}\index{sample space} $\Omega$ that
  comprises all possible outcomes of the random process modelled by
  the probability space.

\item A family of sets $\mathcal{F}$ representing the allowable
  \emph{events}\index{event}. Each set in $\mathcal{F}$ is a subset of
  $\Omega$.

\item A probability function\index{probability!function}
  $\Pr: \mathcal{F} \to \R$.
\end{enumerate}
%%
Each $e \in \Omega$ is called a \emph{simple}\index{event!simple} or
\emph{elementary event}\index{event!elementary}.

A \emph{probability function}\index{probability!function} is any
function $\Pr: \mathcal{F} \to \R$ satisfying the following
conditions:
%%
\begin{enumerate}
\item For any event $E$, we have $0 \leq \Pr(E) \leq 1$.

\item The sum of the probability of each event is $1$,
  i.e. $\Pr(\Omega) = 1$.

\item For any finite or countably infinite sequence of pairwise
  mutually disjoint events $E_1, E_2, \dots$, we have
  \[
  \Pr \left( \bigcup_{i \geq 1} E_i \right)
  =
  \sum_{i \geq 1} \Pr(E_i).
  \]
\end{enumerate}

A probability space is said to be
\emph{discrete}\index{probability space!discrete} if it satisfies the
following conditions:
%%
\begin{enumerate}
\item The sample space $\Omega$ is finite or countably infinite.

\item The family $\mathcal{F}$ of allowable events is the
  powerset\index{powerset} of $\Omega$, i.e. $\mathcal{F}$ consists of
  all subsets of $\Omega$.
\end{enumerate}

We use standard set notation\index{set} to talk about events:
%%
\begin{itemize}
\item Union\index{set!union} of events: $E_1 \cup E_2$

\item Intersection\index{set!intersection} of events:
  $E_1 \cap E_2$

\item Set difference\index{set!difference}: $E_1 - E_2$

\item Set complement\index{set!complement}: $\overline{E} = \Omega - E$
\end{itemize}

\begin{lemma}
For any two events $E_1$ and $E_2$, we have
\[
\Pr(E_1 \cup E_2)
=
\Pr(E_1) + \Pr(E_2) - \Pr(E_1 \cap E_2).
\]
\end{lemma}

\begin{proof}
Apply the identities
%%
\begin{align*}
\Pr(E_1) &= \Pr\big(E_1 - (E_1 \cap E_2)\big) + \Pr(E_1 \cap E_2) \\
\Pr(E_2) &= \Pr\big(E_2 - (E_1 \cap E_2)\big) + \Pr(E_1 \cap E_2)
\end{align*}
%%
to obtain
\[
\Pr(E_1 \cup E_2)
=
\Pr\big(E_1 - (E_1 \cap E_2)\big)
+
\Pr\big(E_2 - (E_1 \cap E_2)\big)
+
\Pr(E_1 \cap E_2)
\]
from which the lemma follows.
\end{proof}

\begin{lemma}
\label{lem:events:union_finite_sequence_events}
For any finite or countably infinite sequence of events
$E_1, E_2, \dots$, we have
\[
\Pr\left(\bigcup_{i \geq 1} E_i\right)
\leq
\sum_{i \geq 1} \Pr(E_i).
\]
\end{lemma}

In Lemma~\ref{lem:events:union_finite_sequence_events}, we only
require that the sequence of events be finite or countably
infinite. In contrast, the definition of a probability
function\index{probability!function} requires this same sequence of
events to be pairwise mutually disjoint\index{event!disjoint}.

\begin{lemma}
\textbf{Inclusion-exclusion principle.}
\index{inclusion-exclusion principle}
Let $E_1, \dots, E_n$ be any $n$ events. Then
%%
\begin{align*}
\Pr\left(\bigcup_i E_i\right)
&=
\sum_i \Pr(E_i) \\
&\quad -
\sum_{i < j} \Pr(E_i \cap E_j) \\
&\quad +
\sum_{i < j < k} \Pr(E_i \cap E_j \cap E_k) \\
&\quad - \cdots +
(-1)^{\ell + 1}
\sum_{i_1 < \cdots < i_\ell} \Pr\left(\bigcap_{r=1}^\ell E_{i_r}\right)
+ \cdots
\end{align*}
\end{lemma}

Going back to our randomized
Algorithm~\ref{alg:events:verify_polynomial_identity} for
verifying\index{polynomial!identity verification} that two polynomials
are equivalent. Let $E$ be the event that the algorithm returns an
incorrect answer. The elements of the set corresponding to $E$ are the
roots of $F(x) - G(x)$ that are integers in $\{1, 2, \dots, 100d\}$.
The event $E$ includes at most $d$ simple\index{event!simple} events,
hence
%%
\begin{align*}
\index{randomized algorithm!failure probability}
\Pr(\text{algorithm fails})
&=
\Pr(E) \\
&\leq
\frac{d}{100d}
=
\frac{1}{100}.
\end{align*}
%%
How do we reduce the probability of
failure\index{randomized algorithm!failure probability}? One way is to
increase the sample space\index{sample space} from
$\{1, 2, \dots, 100d\}$ to $\{1, 2, \dots, 1000d\}$, thereby improving
on the probability of
failure\index{randomized algorithm!failure probability}, which is now
at
%%
\begin{align*}
\index{randomized algorithm!failure probability}
\Pr(\text{algorithm fails})
&=
\Pr(E) \\
&\leq
\frac{d}{1000d}
=
\frac{1}{1000}.
\end{align*}
%%
Another approach to improving the probability of
failure\index{randomized algorithm!failure probability} is to run the
algorithm multiple times. Note that the algorithm has a one-sided
error\index{error!one-sided}: the algorithm may be wrong only when it
says that $F(x)$ and $G(x)$ are equivalent. If a run outputs that
$F(r) \neq G(r)$ for some $r$ chosen uniformly at
random\index{random!uniform}, then we know that
$F(x) \not\equiv G(x)$. For multiple runs, if $F(r) \neq G(r)$ in at
least one round, we conclude that $F(x) \not\equiv G(x)$. The
algorithm outputs $F(x) \equiv G(x)$ if $F(r) = G(r)$ in all runs.

Repeatedly choosing random elements from a sample space is known as
\emph{sampling}\index{sampling}. We can sample with
replacement\index{sampling!with replacement}, meaning that we do not
care whether or not an element has already been chosen, opening up the
possibility of choosing the same element twice or more times. Sampling
without replacement\index{sampling!without replacement} forbids an
element from being chosen more than once.

The events $E$ and $F$ are said to be
\emph{independent}\index{event!independent} if
\[
\Pr(E \cap F)
=
\Pr(E) \times \Pr(F).
\]
In general, a finite sequence of events $E_1, E_2, \dots, E_k$ are
mutually independent\index{event!independent} if for any subset
$I \subseteq \{1, 2, \dots, k\}$ we have
\[
\Pr\left(\bigcap_{i \in I} E_i\right)
=
\prod_{i \in I} \Pr(E_i).
\]

The \emph{conditional probability}\index{probability!conditional} that
event $E$ occurs given event $F$ occurs is
\[
\Pr(E \mid F)
=
\frac{\Pr(E \cap F)} {\Pr(F)}.
\]
The conditional probability\index{probability!conditional} is
well-defined only if $\Pr(F) > 0$.

When events $E$ and $F$ are independent\index{event!independent} and
$\Pr(F) \neq 0$, then
\[
\Pr(E \mid F)
=
\frac{\Pr(E \cap F)} {\Pr(F)}
=
\frac{\Pr(E) \cdot \Pr(F)} {\Pr(F)}
=
\Pr(E).
\]

\begin{theorem}
\textbf{Law of total probability.}\index{law of total probability}
Let $E_1, E_2, \dots, E_n$ be mutually disjoint
events\index{event!disjoint} in the sample space $\Omega$ such that
$\bigcup_i E_i = \Omega$. Then
%%
\begin{align*}
\Pr(B)
&=
\sum_i \Pr(B \cap E_i) \\
&=
\sum_i \Pr(B \mid E_i) \cdot \Pr(E_i)
\end{align*}
%%
for any event $B \in \Omega$.
\end{theorem}

\begin{proof}
For $i = 1, 2, \dots, n$ the events $B \cap E_i$ are
disjoint\index{event!disjoint} and cover all of $\Omega$. Then
\[
\Pr(B)
=
\sum_i \Pr(B \cap E_i).
\]
Use the definition of conditional
probability\index{probability!conditional} to obtain
\[
\sum_i \Pr(B \cap E_i)
=
\sum_i \Pr(B \mid E_i) \cdot \Pr(E_i)
\]
and the theorem follows.
\end{proof}

\begin{theorem}
\textbf{Bayes' law.}
\index{Bayes' law}
Let $E_1, E_2, \dots E_n$ be mutually disjoint
sets\index{set!disjoint} such that $E = \bigcup_i E_i$. Then
%%
\begin{align*}
\Pr(E_j \mid B)
&=
\frac{\Pr(E_j \cap B)} {\Pr(B)} \\[4pt]
&=
\frac{\Pr(B \mid E_j) \cdot \Pr(E_j)} {\sum_i \Pr(B \mid E_i) \cdot \Pr(E_i)}
\end{align*}
%%
for any set $B$.
\end{theorem}
