\chapter{The Foundations: Logic and Proofs}

\section{Propositional Logic}

\begin{definition}
  A \emph{proposition} is a declarative sentence that is either true of false,
  but not both.
\end{definition}

``What time is it?'' and ``Read this carefully.'' are not propositions because
they are not declarative sentences. Also $x+y=z$ is not a proposition, but it
can be turned into a proposition if we assign values to the variables.

\begin{definition}
  Let $p$ be a proposition. The \emph{negation} of $p$, denoted by $\lnot p$
  (also denoted by $\overline{p}$), is the statement: ``It is no the case that
  $p$.'' It is read ``not $p$'' and its truth value is the opposite of the
  truth value $p$.
\end{definition}

\begin{definition}
  Let $p$ and $q$ be propositions. The \emph{conjunction} of $p$ and $q$,
  denoted by $p\wedge q$, is the proposition ``$p$ and $q$''. The conjunction
  $p\wedge q$ is true when \emph{both} $p$ and $q$ are true and is false
  otherwise.
\end{definition}

\begin{definition}
  Let $p$ and $q$ be propositions. The \emph{disjunction} of $p$ and $q$,
  denoted by $p\vee q$, is the proposition ``$p$ or $q$''. The disjunction
  $p\vee q$ is false when \emph{both} $p$ and $q$ are false and is true
  otherwise.
\end{definition}

\begin{definition}
  Let $p$ and $q$ be propositions. The \emph{exclusive or} of $p$ and $q$,
  denoted by $p\oplus q$, is the proposition that is true when exactly one of
  $p$ and $q$ is true and is false otherwise.
\end{definition}

\begin{definition}
  Let $p$ and $q$ be propositions. The \emph{conditional statement} $p\to q$ is
  the proposition ``if $p$, then $q$''. The conditional statement $p\to q$
  is false when $p$ is true and $q$ is false, and true otherwise. In the
  conditional statement $p\to q$, $p$ is called the \emph{hypothesis} (or
  \emph{antecedent} or \emph{premise}) and $q$ is called the
  \emph{conclusion} (or \emph{consequence}).
\end{definition}

The statement $p\to q$ is called a conditional statement because $p\to q$
asserts that $q$ is true on the condition that $p$ holds. It is also called an
\emph{implication}. Note that the statement $p\to q$ is true when $p$ is false
(no matter what truth value $q$ has).
\\

%\marginpar{Some quirkness}
%Note that the conditional statement
%\begin{center}
%  ``If today is Friday, then $2+3=5$.''
%\end{center}
%is true from the definition, because its conclustion is true. (The truth value
%of the hypothesis doesn't matter then.) The conditional statement
%\begin{center}
%  ``If today is Friday, then $2+3=6$.''
%\end{center}
%is true every day except Friday, even though $2+3=6$ is false.
%\\

Given a conditional statement $p\to q$:
\begin{itemize}
  \item The \emph{converse:} $q\to p$.
  \item The \emph{contrapositive:} $\lnot q\to\lnot p$.
  \item The \emph{inverse:} $\lnot p\to\lnot q$.
\end{itemize}
Only the contrapositive is equivalent to the original conditional statement.
(The converse is equivalent to the inverse though.)

\begin{definition}
  Let $p$ and $q$ be propositions. The \emph{biconditional statement} $p\lra q$
  is the proposition ``$p$ if and only if $q$''. The biconditional statement
  $p\lra q$ is true when $p$ and $q$ have the same truth values, and is false
  otherwise. Biconditional statements are also called bi-implications.
\end{definition}

\section{Propositional Equivalences}

An important type of step used in a mathematical argument is the replacement of
a statement with another statement with the same truth value. We'll use the term
``compound proposition'' to refer to an expression formed from propositional
variables using logical operators.

\begin{definition}
  A compound proposition that is always true, no matter what the truth values
  of the propositions that occur in it, is called a \emph{tautology}. A
  compound proposition that is always false is called a \emph{contradiction}.
  A compound proposition that is neither a tautology nor a contradiction is
  called a \emph{contingency}.
\end{definition}

\begin{example}
  $p\vee\lnot p$ is a tautology. $p\wedge\lnot p$ is a contradiction.
\end{example}

\begin{definition}
  The compound propositions $p$ and $q$ are called \emph{logically equivalent}
  if $p\lra q$ is a tautology. The notation $p\equiv q$ denotes this.
\end{definition}

\begin{remark}
  The symbol $\equiv$ is not a logical connective, $p\equiv q$ is
  not a compound proposition but rather is a statement that $p\lra q$
  is a tautology. The symbol $\Leftrightarrow$ is sometimes used instead of
  $\equiv$ to denote logical equivalence.
  \\

  We can use truth table to determine whether two compound proposition are
  logically equivalent. A truth table records the truth value of a compound
  proposition under all assignments of its propositional variables.
\end{remark}

\begin{example}
  $\lnot(p\vee q)\equiv(\lnot p\wedge\lnot q)$,
  $p\to q\equiv\lnot p\vee q$
\end{example}

Some important logical equivalences:
\begin{itemize}
  \item Identity laws: $p\wedge\mathbf{T}\equiv p$, $p\vee\mathbf{F}\equiv p$.
  \item Domination laws: $p\vee\mathbf{T}\equiv\mathbf{T}$,
    $p\wedge\mathbf{F}\equiv\mathbf{F}$.
  \item Idempotent laws: $p\wedge p\equiv p\vee p\equiv p$.
  \item Double negation law: $\lnot(\lnot p)\equiv p$.
  \item Commutative laws: $p\vee q\equiv q\vee p$, $p\wedge q\equiv q\wedge p$.
  \item Associative laws:
    $(p\vee q)\vee r\equiv p\vee(q\vee r)$,
    $(p\wedge q)\wedge r\equiv p\wedge(q\wedge r)$.
  \item Distributive laws: $p\vee(q\wedge r)\equiv (p\vee q)\wedge(p\vee r)$,
    $p\wedge(q\vee r)\equiv (p\wedge q)\vee(p\wedge r)$.
  \item De Morgan's laws: $\lnot(p\wedge q)\equiv\lnot p\vee\lnot q$,
    $\lnot(p\vee q)\equiv\lnot p\wedge \lnot q$.
  \item Absorption laws: $p\vee(p\wedge q)\equiv p$, $p\wedge(p\vee q)\equiv p$.
  \item Negation laws: $p\vee\lnot p\equiv\mathbf{T}$,
    $p\wedge\lnot p\equiv\mathbf{F}$.
\end{itemize}
Note: De Morgan's laws can be extended to more than two propositions.
\\

Additional logical equivalence involving conditional and biconditional
statements:
\begin{itemize}
  \item $p\to q\equiv\lnot p\vee q$.
  \item $p\to q\equiv\lnot q\to\lnot p$.
  \item $p\vee q\equiv\lnot p\to q$.
  \item $p\wedge q\equiv\lnot(p\to\lnot q)$.
  \item $\lnot(p\to q)\equiv p\wedge\lnot q$.
  \item $(p\to q)\wedge(p\to r)\equiv p\to(q\wedge r)$.
  \item $(p\to r)\wedge(q\to r)\equiv (p\vee q)\to r$.
  \item $(p\to q)\vee(p\to r)\equiv p\to(q\vee r)$.
  \item $(p\to r)\vee(q\to r)\equiv (p\wedge q)\to r$.
  \item $p\lra q\equiv(p\to q)\wedge(q\to p)$.
  \item $p\lra q\equiv\lnot p\lra\lnot q$.
  \item $p\lra q\equiv(p\wedge q)\vee (\lnot p\wedge\lnot q)$.
  \item $\lnot(p\lra q)\equiv p\lra\lnot q$.
\end{itemize}

% \begin{example}
%   using De Morgan's laws: ``Miguel has a cellphone and a
%   laptop'' is the same as ``Miguel doesn't have a cellphone or doesn't have a
%   laptop''. ``Heather will go to the concert or Steve will go to the concert''
%   is the same as ``Heather and Steve won't go to the concert''.
% \end{example}

\begin{example}
  Show that $\lnot(p\to q)$ and $p\wedge\lnot q$ are logically
  equivalent.
\end{example}

\begin{proof}[Solution:]
  \begin{align*}
    \lnot\left(p\to q\right)
    &\equiv\lnot\left(\lnot p\vee q\right)\\
    &\equiv\lnot\left(\lnot p\right)\wedge\lnot q\\
    &\equiv p\wedge\lnot q
  \end{align*}
\end{proof}

\begin{example}
  Show that $\lnot(p\vee(\lnot p\wedge q))\equiv \lnot p\wedge\lnot q$.
\end{example}

\begin{proof}[Solution:]
  \begin{align*}
    \lnot\left(p\vee\left(\lnot p\wedge q\right)\right)
    &\equiv\lnot p\wedge\lnot\left(\lnot p\wedge q\right)\\
    &\equiv\lnot p\wedge\left(p\vee \lnot q\right)\\
    &\equiv\left(\lnot p\wedge p\right)\vee\left(\lnot p\wedge\lnot q\right)\\
    &\equiv\mathbf{F}\vee\left(\lnot p\wedge\lnot q\right)\\
    &\equiv\lnot p\wedge\lnot q
  \end{align*}
\end{proof}

\section{Predicates and Quantifiers}

Consider the statement ``$x$ is greater than 3'', it has two parts. The first
part, the variable $x$, is the subject of the statement. The second part, the
\emph{predicate}, ``is greater than 3'', refers to a property that the subject
of the statement can have. We denote ``$x$ is greater than 3'' by $P(x)$, where
$p$ is denotes the predicate ``is greater than 3''. The statement $P(x)$ is
also said to be the value of the \emph{propositional function} $P$ at $x$. Once
a value has been assigned to $x$, the statement $P(x)$ becomes a proposition
and has a truth value. For example $P(4)$ is true but $P(2)$ is false.
\\

In general, a statement involving $n$ variables can be denoted by
$P(x_1,x_2,\dots,x_n)$. A statement of this form is the value of the
\emph{proposition function} $P$ at the $n$-tuple $(x_1,\dots,x_n)$, and $P$
is also called a $n$-\emph{place predicate} or a $n$-\emph{ary predicate}.
\\

We can assign values to variables to make a proposition out of a propositional
function. However there is another way to do this, called
\emph{quantification}. Quantification expresses the extent to which a predicate
is true over a range of elements. For example, the words \emph{all},
\emph{some} and \emph{none} in English. We focus on two types of
quantification: universal and existential. The area of logic that deals with
predicates and quantifiers is called the \emph{predicate calculus}.
\\

There is a \emph{domain of discourse} (or \emph{universe of discourse}, or just
\emph{domain}), which must always be specified when a universal quantifier is
used.

\begin{definition} 
  The \emph{universal quantification} of $P(x)$ is the statement:
  ``$P(x)$ for all values of $x$ in the domain''. The notation $\forall
  x\;P(x)$ denotes this. Here $\forall$ is called the \emph{universal
  quantifier}. An element for which $P(x)$ is false is called a
  \emph{counterexample} of $\forall x\;P(x)$.
\end{definition}

\begin{example}
  Let $P(x)$ be ``$x+1>x$'', and the domain consists of all real numbers,
  $\forall x\;P(x)$ is true because if $x$ is real, $x+1>x$. Let $Q(x)$ be
  ``$x<2$'' with the same domain, $\forall x\;Q(x)$ is false because $Q(3)$ is
  false.
\end{example}

\begin{definition}
  The \emph{existential quantification} of $P(x)$ is the proposition: ``There
  exists an element $x$ in the domain such that $P(x)$''. We use the notation
  $\exists x\;P(x)$ for the existential quantification of $P(x)$. Here
  $\exists$ is called the \emph{existential quantifier}.
\end{definition}

\begin{example}
  Let $P(x)$ be ``$x>3$'', domain be reals, $\exists x\;P(x)$ is true because
  $P(4)$ is true. Let $Q(x)$ be ``$x=x+1$'', same domain, $\exists x\;Q(x)$ is
  false.
\end{example}

\begin{center}
  \begin{tabular}{|l|l|l|}
    \hline
    &
    \textbf{\textit{When True?}} & \textbf{\textit{When False?}}
    \\\hline
    $\forall x\;P(x)$ & $P(x)$ is true for every $x$.
    &
    There is an $x$ s.t $P(x)$ is false.
    \\
    $\exists x\;P(x)$ & There is an $x$ s.t. $P(x)$ is true.
    &
    $P(x)$ is false for every $x$.
    \\\hline
  \end{tabular}
\end{center}

\begin{remark}
  Generally, an implicit assumption is that all domains of discourse for
  quantifers are nonempty. If it is empty, then $\exists x\;P(x)$ is always
  false and $\forall x\;P(x)$ is always true (vacously).
\end{remark}

When elements in the domain are finite, universal quantifier can be expressed
using conjunction, and existential using disjunction.
\\

There is also a \emph{uniqueness quantifier}, denoted by $\exists!$ or
$\exists_1$, where $\exists!x\;P(x)$ states ``There exists a unique $x$ such
that $P(x)$ is true''.
\\

The restriction of a universal quantification is the same as the universal
quantification of a conditional statement: $\forall x<0,\;x^2>0$ is same as
$\forall x\;(x<0\to x^2>0)$. On the other hand, the restriction of an
existential quantification is the same as the existential quantification of
a conjunction: $\exists z>0\;(z^2=2)$ is the same as $\exists z\;(z>0\wedge
z^2=2)$.
\\

When a quantifier is used on the variable $x$, we say that this occurrence of
the variable is \emph{bound}. An occurrence of a variable that is not bound by
a quantifier or set equal to a particular value is said to be \emph{free}.
All the variables that occur in a propositional function must be bound or set
to a value in order to turn it into a proposition.
\\

Quantifers have a \emph{scope}, a variable is free if it's outside the scope
of all quantifiers that specifies this variable.

\begin{example}
  In $\exists x(x+y=1)$, $x$ is bound by $\exists x$, but $y$ is free. In
  $\exists x\;(P(x)\wedge Q(x))\vee\forall x\;R(x)$, all variables are bound.
  The first two $x$'s is bound by the $\exists x$, the last one is bound by the
  $\forall x$.
\end{example}

\begin{definition}
  Statements involving predicates and quantifers are \emph{logically
  equivalent} iff they have the same truth value no matter which predicates are
  substituted into these statements and which domain of discourse is used.
\end{definition}

\begin{example}
  Show that $\forall x\;(P(x)\wedge
  Q(x))\equiv\forall x\; P(x)\wedge\forall x\;
  Q(x)$ (where the same domain is used throughout). That is, we can
  distribute universal quantifier over a conjunction, we can also distribute
  existential quantifier over a disjunction, but that's it (see Exercises 50
  and 51).
\end{example}

\begin{proof}[Solution:]
  Suppose $\forall x\;(P(x)\wedge Q(x))$ is true. This means that if $a$ is in
  the domain, then both $P(a)$ and $Q(a)$ is true. Since they both holds
  simultanously for every element in the domain, we can conclude that $\forall
  x\;P(x)\wedge\forall x\;Q(x)$ is true. Next, suppose $\forall
  x\;P(x)\wedge\forall x\; Q(x)$ is true. It follows that if $a$ is in the
  domain, both $P(x)$ and $Q(x)$ is true, so the LHS is also true.
\end{proof}

De Morgan's laws for quantifiers:
\begin{align*}
  \lnot\forall x\;P\left(x\right)&\equiv\exists x\;\lnot P\left(x\right),\\
  \lnot\exists x\;P\left(x\right)&\equiv\forall x\;\lnot P\left(x\right).
\end{align*}
Note that when the domain is finite, then this is exactly the same as De
Morgan's laws in Section 1.2 (recall universal VS conjunction, existential VS
disjunction).

\begin{example}
  \begin{align*}
    \lnot\forall x\;\left(P\left(x\right)\to Q\left(x\right)\right)
    &\equiv
    \lnot\forall x\;\left(\lnot P\left(x\right)\vee Q\left(x\right)\right)
    \\&\equiv
    \exists x\;\lnot\left(\lnot P\left(x\right)\vee Q\left(x\right)\right)
    \tag{De Morgan}\\&\equiv
    \exists x\;\left(P\left(x\right)\wedge\lnot Q\left(x\right)\right)
  \end{align*}
\end{example}

\section{Nested Quantifiers}

\begin{example}
  Assume that the domain for $x$ and $y$ consists of all real numbers.
  This is the commutative law for addition: $\forall x\forall y\;(x+y=y+x)$.
  The associatvie law for addition: $\forall x\forall y\forall
  z\;(x+(y+z)=(x+y)+z)$.
\end{example}

\begin{example}
  Translate into English the statement
  \begin{align*}
    \forall x\forall
    y\;\left(\left(x>0\right)\wedge\left(y<0\right)\to\left(xy<0\right)\right).
  \end{align*}
\end{example}

\begin{proof}[Solution:]
  For every real number x and for every real number y, if $x>0$ and $y<0$, then
  $xy<0$.
\end{proof}

Thinking of quantification as loops: $\forall$ iterate over all elements, if
any element makes the predicate false, return false, otherwise return true.
$\exists$ iterate over all elements, if any element makes the predicate true,
return true, otherwise return false.
\\

The order of nested universal quantifers (or existential quantifers) in a
statement without other quantifiers can be changed without changing the meaning
of the quantified statement:
$\forall x\forall y\;(x+y=y+x)\equiv\forall y\forall x\;(x+y=y+x)$.
\\

However, if we mix them up, this is no longer true:
$\forall x\exists y\;(x+y=0)\centernot\equiv\exists y\forall
x\;(x+y=0)$.

\begin{example} 
  Definition of limit $\lim_{x\to a}f(x)=L$:
  \begin{align*}
    \forall\epsilon\;\exists\delta\;\forall
    x\;\left(0<\abs{x-a}<\delta\to\abs{f(x)-L}<\epsilon\right)
  \end{align*}
  where the domain for $\delta$ and $\epsilon$ consists of all positive real
  numbers and the domain for $x$ consists of all real numbers. This definition
  can also be expressed as
  \begin{align*}
    \forall\epsilon>0\;\exists\delta>0\;\forall
    x\;\left(0<\abs{x-a}<\delta\to\abs{f(x)-L}<\epsilon\right)
  \end{align*}
  and the domain for all variables consists of all real numbers (restricted
  quantifers).
\end{example}

\begin{example}
  If a person is female and is a parent, then this person is someone's mother:
  \begin{align*}
    \forall x\;\left(
    \left(F\left(x\right)\wedge P\left(x\right)\right)\to
    \exists y\;M\left(x,y\right)
    \right).
  \end{align*}
  Using the null quantification rule in part (b) of Exercise 47 in Section 1.3,
  we can move $\exists y$ to the left because $y$ does not appear in
  $F\left(x\right)\wedge P\left(x\right)$:
  \begin{align*}
    \forall x\exists y\;\left(
    \left(F\left(x\right)\wedge P\left(x\right)\right)\to M\left(x,y\right)
    \right).
  \end{align*}
\end{example}

\begin{example}
  The uniqueness quantifer $\exists!$ can be expressed using nested quantifers.
  Everyone has exactly one best friend:
  \begin{align*}
    \forall x\exists y\left(B\left(x,y\right)\wedge
    \forall z\;\left(\left(z\neq y\right)\to\lnot
    B\left(x,z\right)\right)\right).
  \end{align*}
\end{example}

Negating nested quantifers can be done by successively applying the rules for
negating statements involving a single quantifer.

\begin{example}
  \begin{align*}
    \lnot\left(\forall x\exists y\;\left(xy=1\right)\right)
    &\exists x\;\lnot\left(\exists y\;\left(xy=1\right)\right)\\
    &\exists x\forall y\;\lnot\left(xy=1\right)
  \end{align*}
\end{example}

\begin{example}
  $\lim_{x\to a}f(x)\neq L$:
  \begin{align*}
    &\lnot\left(
    \forall\epsilon>0\;\exists\delta>0\;\forall x\;
    \left(0<\abs{x-a}<\delta\to\abs{f(x)-L}<\epsilon\right)
    \right)\\
    \equiv&
    \exists\epsilon>0\;\lnot\left(\exists\delta>0\;\forall x\;
    \left(0<\abs{x-a}<\delta\to\abs{f\left(x\right)-L}<\epsilon\right)\right)\\
    \equiv&
    \exists\epsilon>0\;\forall\delta>0\;\lnot\left(\forall x\;
    \left(0<\abs{x-a}<\delta\to\abs{f\left(x\right)-L}<\epsilon\right)\right)\\
    \equiv&
    \exists\epsilon>0\;\forall\delta>0\;\exists x\;\lnot
    \left(0<\abs{x-a}<\delta\to\abs{f(x)-L}<\epsilon\right)\\
    \equiv&
    \exists\epsilon>0\;\forall\delta>0\;\exists x\;
    \left(0<\abs{x-a}<\delta\wedge\abs{f\left(x\right)-L}\ge\epsilon\right)
  \end{align*}
  Then the limit doesn't exists is expressed by:
  \begin{align*}
    \forall L\;\exists\epsilon>0\;\forall\delta>0\;\exists x\;
    \left(0<\abs{x-a}<\delta\wedge\abs{f\left(x\right)-L}\ge\epsilon\right).
  \end{align*}
\end{example}

\section{Rules of Inference}

\subsection{Valid Arguments in Propositional Logic}

\begin{definition}
  An \emph{argument} in propositional logic is a sequence of propositions. All
  but the final proposition in the argument are called \emph{premises} and the
  final proposition is called the \emph{conclusion}. An argument is
  \emph{valid} if the truth of all its premises implies that the conlucsion is
  true.
  \\

  An \emph{argument form} in propositional logic is a sequence of compound
  propositions involving propositional variables. An argument form is
  \emph{valid} if no matter which particular propositions are substituted for
  the propositional variables in its premises, the conclustion is true if the
  premises are all true.
\end{definition}

From the definition we see that the argument form with premises
$p_1,p_2,\dots,p_n$ and conclusion $q$ is valid, when $(p_1\wedge
p_1\wedge\dots\wedge p_n)\to q$ is a tautology.
\\

The key to showing that an argument in propositional logic is valid is to show
that its argument form is valid.

\subsection{Rules of Inference for Propositional Logic}

We can, but we usually don't, use truth table to show that an argument form is
valid. Instead, we can first establish the validity of some relatively simple
argument forms, called \emph{rules of inference}. These rules of inference can
be used as building blocks to construct more complicated valid argment forms.
\\

The tautology $(p\wedge(p\to q))\to q$ is the basis of the rule of inference
called \emph{modus ponens}, or the \emph{law of detachment}. (Modus ponens is
Latin for \emph{mode that affirms}.) This tautology leads to the following
valid argument form:
\begin{align*}
  \inference[Mudus ponens]{p \\ p\to q}{q}
\end{align*}

% \begin{example}
%   If the conditional statement ``If it snows today, then we will go skiing''
%   and its hypothesis, ``It is snowing today'', are true, then, by modus ponens,
%   it follows that the conclusion of the condition statement, ``We will go
%   skiing'', is true.
% \end{example}

However, note that a valid argument form can lead to a incorrect conclusion if
one or more of its premises is false.
\\

Some other basic rules of inference:
\begin{align*}
  \inference[Modus tollens]{\lnot q \\ p\to q}{\lnot p}
  \\
  \\
  \inference[Hypothetical syllogism]{p\to q \\ q\to r}{p\to r}
  \\
  \\
  \inference[Disjunctive syllogism]{p\vee q \\ \lnot p}{q}
  \\
  \\
  \inference[Addition]{p}{p\vee q}
  \\
  \\
  \inference[Simplification]{p\wedge q}{p}
  \\
  \\
  \inference[Conjunction]{p \\ q}{p\wedge q}
  \\
  \\
  \inference[Resolution]{p\vee q \\ \lnot p\vee r}{q\vee r}
\end{align*}

\subsection{Fallacies}

Several common fallacies arise in incorrect arguments. These fallacies resemble
rules of inference but are based on contingencies rather than tautologies. For
example, the proposition $[(p\to q)\wedge q]\to p$ is not a tautology, and the
proposition $[(p\to q)\wedge\lnot p]\to\lnot q$ is also not a tautology.

\subsection{Rules of Inference for Quantified Statements}

\begin{align*}
  \inference[Universal instantiation]
  {\forall x\;P\left(x\right)}{P\left(c\right)\text{ for an arbitrary }c}
  \\
  \\
  \inference[Universal generaliztion]
  {P\left(c\right)\text{ for an arbitrary }c}
  {\forall x\;P\left(x\right)}
  \\
  \\
  \inference[Existential instantiation]
  {\exists x\;P\left(x\right)}
  {P(c)\text{ for some element }c}
  \\
  \\
  \inference[Existential generaliztion]
  {P\left(c\right)\text{ for some element }c}
  {\exists x\;P\left(x\right)}
\end{align*}

\subsection{Combining Rules of Inference for Propositions and Quantified
Statements}

\begin{align*}
  \inference[Universal modus ponens]
  {\forall x\;\left(P\left(x\right)\to Q\left(x\right)\right)
  \\
  P\left(a\right),\text{ where $a$ is a particular element in the domain}}
  {Q(a)}
  \\ \\
  \inference[Universal modus tollens]
  {\forall x\;\left(P\left(x\right)\to Q\left(x\right)\right)
  \\
  \lnot Q\left(a\right),\text{ where $a$ is a particular element in the
  domain}}
  {\lnot P\left(a\right)}
\end{align*}

\section{Introduction to Proofs}

\subsection{Some Terminology}

Formally, a \emph{theorem} is a statement that can be shown to be true. The
term theorem is usually reserved for a statement that is considered at least
somewhat important. Less important theorems are sometimes called
\emph{propositions} (or \emph{facts} or \emph{results}). We demonstrate that a
theorem is true with a \emph{proof}. A proof is a valid argument that
establishes the truth of a theorem. The statements used in a proof can include
\emph{axioms} (or \emph{postulates}), the premises, if any, and previously
proven theorems. Axioms may be stated using primitive terms that do not require
definition, but all other terms used in theorems and their proofs must be
defined. Rules of inference, together with definitions of terms, are used to
draw conclusions from other assertions, tying together the stpes of a proof. In
practise, the final step of a proof is usually just the conlusion of the
theorem.
\\

A less important theorem that is helpful in the proof of other results is called
a \emph{lemma} (plural \emph{lemmas} or \emph{lemmata}). A \emph{corollary} is
a theorem that can be established directly from a theorem that has been proved.
A \emph{conjecture} is a statement that is being proposed to be a true
statement, usually on the basis of some partial evidence, a heuristic argument,
or the intuition of an expert. When a proof of a conjecture is found, the
conjecture becomes a theorem.

\subsection{Direct Proofs}

A \emph{direct proof} of a conditional statement $p\to q$ is constructed when
the first step is the assumption that $p$ is true; subsequent steps are
constructed using rules of inference, with the final step showing that $q$ must
also be true.

\subsection{Proof by Contraposition}

\emph{Proofs by contradiction} make use of the fact that the conditional
statement $p\to q$ is equivalent to its contrapositive, $\lnot q\to\lnot p$.
This means that the conditional statement $p\to q$ can be proved by showing that
its contrapositive, $\lnot q\to\lnot p$, is true. We take $\lnot q$ as
hypothesis and show that $\lnot p$ must follow.

\begin{example}
  Prove that if $n$ is an integer and $3n+2$ is odd, then $n$ is odd.
\end{example}

\begin{proof}[Solution:]
  Proof by contraposition. Suppose $n=2k$ is even, then $3n+2=6k+2=2(3k+1)$ is
  also even (not odd), so the statement is proven.
\end{proof}

We can quickly prove a conditional statement $p\to q$ if we know $p$ is false
(\emph{vacuous proof}) or if we know that $q$ is true (\emph{trivial proof}).

\subsection{Proof by Contradiction}

Suppose we want to prove that a statement $p$ is true. Furthermore, suppose we
can find a contradiction $q$ such that $\lnot p\to q$ is true, then we can
conclude that $\lnot p$ is false, so $p$ is true. Proofs of this type are
called \emph{proofs by contradiction}.

\begin{example}
  Prove that $\sqrt{2}$ is irrational.
\end{example}

\begin{proof}[Solution:]
  Let $p$ be the proposition ``$\sqrt{2}$ is irrational''. Suppose that $\lnot
  p$ is true. If $\sqrt{2}$ is rational, then there exist integers $a$ and $b$
  with $\sqrt{2}=a/b$, where $a$ and $b$ have no common factors (so that the
  fraction $a/b$ is in lowest terms). Here, we are using the fact that every
  rational number can be written in lowest terms. So it follows that
  \begin{align*}
    2&=a^2/b^2\\
    2b^2&=a^2
  \end{align*}
  We see that $a^2$ is even, so $a$ must also be even (assumed here), so we
  have $a=2c$ for some integer $c$.Thus,
  \begin{align*}
    2b^2&=4c^2\\
    b^2&=2c^2
  \end{align*}
  So $b$ is also even. We assumed $a$ and $b$ have no common factors, but both
  $a$ and $b$ are even. Contradiction, so $\sqrt{2}$ is irrational.
\end{proof}

Proofs of equivalence: To show $p\lra q$, show that $p\to q$ and $q\to p$. To
show $p_1\lra\dots\lra p_n$, we can show that $p_1\to\dots\to p_n\to p_1$.
\\

To show that a statement of the form $\forall x\;P(x)$ is false, we need only
find a counterexample.
\\

There is a fallacy called \emph{begging the question}. This fallacy occurs when
one or more stepts of a proof are based on the truth of the statement being
proved.

\section{Proof Methods and Strategy}

\subsection{Exhaustive Proof and Proof by Cases}

Sometimes we cannot prove a theorem using a single argument that holds for all
possible cases. To prove a conditional statement of the form
\begin{align*}
  \left(p_1\vee p_2\vee\dots\vee p_n\right)\to q
\end{align*}
the tautology
\begin{align*}
  \left[\left(p_1\vee p_2\vee\dots\vee p_n\right)\to q\right]\lra
  \left[\left(p_1\to q\right)\wedge\left(p_2\to q\right)\wedge
  \dots\wedge(p_n\to q)\right]
\end{align*}
can be used as a rule of inference. Such an argument is called a \emph{proof by
case}. Sometimes to prove $p\to q$, it is convenient to use a disjunction
$p_1\vee\dots\vee p_n$ instead of $p$ as the hypothesis, where they are
equivalent.
\\

Some theorems can be proved by examining a relatively small number of examples.
Such proofs are called \emph{exhaustive proofs}. An exhaustive proof is a
special type of proof by cases where each case involves checking a single
example.

\begin{example}
  Prove that if $n$ is an integer, then $n^2\ge n$.
\end{example}

\begin{proof}[Solution:]
  Case 1: $n=0$, $n^2=0$, $n^2\ge n$, true.\\
  Case 2: $n\ge 1$, multiply both sides by $n$, we get $n^2\ge n$.\\
  Case 3: $n\le -1$, but $n^2$ is always positive, so $n^2\ge n$.
\end{proof}

When it is not possible to consider all cases of a proof at the same time, a
proof by cases should be considered. Generally, look for a proof by cases where
there is no obvious way to begin a proof, but when extra information in each
case helps move the proof forward. For example, formulate a conjecture about
the decimal digits that occur as the final digit of the square of an integer
and prove the result.

\begin{example}
  By listing some small perfect squares, we notice that final
  digits are 0, 1, 4, 5, 6, and 9. To prove this, we note that we can express an
  integer $n$ as $10a+b$, where $a$ is a positive integer and $b$ is 0, 1, 2, 3,
  4, 5, 6, 7, 8, or 9. Next, note that
  $(10a+b)^2=100a^2+20ab+b^2=10(10a^2+2ab)+b^2$, so the final digit of $n^2$ is
  the same as the final digit of $b^2$. Furthermore the final digit of $b^2$ is
  the same as the final digit of $(10-b)^2=10(10-2b)+b^2$, so we can reduce our
  proof to the consideration of 6 cases.
  \begin{description}
    \item[Case 1:] $b=1$ or $b=9$, last digit is 1.
    \item[Case 2:] $b=2$ or $b=8$, last digit is 4.
    \item[Case 3:] $b=3$ or $b=7$, last digit is 9.
    \item[Case 4:] $b=4$ or $b=6$, last digit is 6.
    \item[Case 5:] $b=5$, last digit is 5.
    \item[Case 6:] $b=0$, last digit is 0.
  \end{description}
\end{example}

\begin{example}
  Show that $(x+y)^r<x^r+y^r$ whenever $x$ and $y$ are positive real numbers
  and $r$ is a real number with $0<r<1$.
\end{example}

\begin{proof}[Solution:]
  Without loss of generality we can assume that $x+y=1$. To see this, suppose
  we have proved the theorem with the assumption that $x+y=1$. Suppose that
  $x+y=t$. Then $(x/t)+(y/t)=1$, whichi implies that
  $((x/t)+(y/t))^r<(x/t)^r+(y/t)^r$. Multiplying both sides by $t^r$ shows that
  $(x+y)^r<x^r+y^r$.
  \\

  Assuming that $x+y=1$, because $x$ and $y$ are positive, we have $0<x<1$ and
  $0<y<1$. Because $0<r<1$, it follows that $0<1-r<1$, so $x^{1-r}<1$ and
  $y^{1-r}<1$. This means that $x<x^r$ and $y<y^r$. Consequently,
  $x^r+y^r>x+y=1$. This means that $(x+y)^r=1^r<x^r+y^r$. This proves the
  theorem for $x+y=1$.
\end{proof}

\subsection{Existence Proofs}

Many theorems are assertions that objects of a particular type exists. A proof
of a proposition of the form $\exists x\;P(x)$ is called an \emph{existence
proof}. A proof that finds an elelment $a$ such that $P(a)$ is true is called
\emph{constructive}. It is also possible to give an existence proof that is
\emph{nonconstructive}. One common way is to use proof by contradiction and
shwo that the negation of the existential quantification implies a
contradiction.

\begin{example}
  A nonconstructive existence proof: show that there exists irrational number
  $x$ and $y$ such that $x^y$ is rational.
\end{example}

\begin{proof}[Solution:]
  We know that $\sqrt{2}$ is irrational. Consider the number
  $\sqrt{2}^{\sqrt{2}}$. If this number is rational, then we are done. If it's
  irrational, we let $x=\sqrt{2}^{\sqrt{2}}$ and $y=\sqrt{2}$, so that
  $x^y=(\sqrt{2}^{\sqrt{2}})^{\sqrt{2}}=2$.
\end{proof}

\begin{example}
  Consider a game Chomp where cookies are laid out on a rectangular grid. The
  top left cookie is poisoned. Players take turn to eat a cookie and all the
  cookies to the right and/or below it. The loser eats the poisoned cookie.
  Show that one of the two players has a winning strategy.
\end{example}

\begin{proof}[Solution:]
  P1 has a winning strategy. First note that the game ends and cannot finish in
  a draw, because with each move at least one cookie is eaten. Now suppose that
  P1 begins the game by eating the bottom right cookie. This move is either the
  first move of a winning strategy for the P1, or the second play can make a
  move after this which is the first move of a winning strategy for the P2. In
  this second case, instead of eating the bottom right cookie, P1 could have
  made the same move that P2 made as the first move of his/her winning
  strategy. This would guarantee a win for P1.
\end{proof}

\subsection{Uniqueness Proofs}

The two parts of a \emph{uniqueness proof} are:
\begin{itemize}
  \item \emph{Existence:} We show that an element $x$ with the ddsired property
    exists.
  \item \emph{Uniqueness:} We show that if $y\neq x$, then $y$ does not have
    the desired property.
\end{itemize}
Equivalently, we can show that if $x$ and $y$ both have the desired property,
then $x=y$.

\begin{example}
  Show that $ar+b=0$ has a unique root $r$.
\end{example}

\begin{proof}[Solution:]
  First the root $r$ exists and $r=-b/a$. Second, suppose there is a real
  number $s$ such that $0=as+b=ar+b$, then subtrating $b$ and dividing by $a$
  gives $s=r$. This means that if $s\neq r$, then $as+b\neq 0$.
\end{proof}

Another strategy called \emph{backward reasoning:} To prove $q$, it might be
helpful to find a statement $p$ such that $p\to q$.

\begin{example}
  Given two positive real numbers $x$ and $y$, their \emph{arithemetic mean} is
  $(x+y)/2$ and their \emph{geometric mean} is $\sqrt{xy}$. Prove that
  arithemetic mean is always greater than the geometric mean for pairs of
  distince positive real numbers.
\end{example}

\begin{proof}[Solution:]
  \begin{align*}
    \left(x+y\right)/2&>\sqrt{xy}\\
    \left(x+y\right)^2/4&>xy\\
    \left(x+y\right)^2&>4xy\\
    x^2+2xy+y^2&>4xy\\
    x^2-2xy+y^2&>0\\
    \left(x-y\right)^2&>0
  \end{align*}
  The last of the sequance of equivalent inequalities is true when $x\neq y$,
  one can easily reverse the stpes and produce a forward proof.
\end{proof}

\begin{example}
  Two player play a game taking turns removing 1, 2, or 3 stones at a time form
  a pile with 15 stones initially. The one who removes the last stone wins.
  Show that P1 has a winning strategy.
\end{example}

\begin{proof}[Solution:]
  For P1 to win, P2 needs to be forced to leave 1, 2 or 3 stones for P1, that
  is P1 needs to leave 4 stones for P2, that is P2 needs to be forced to leave
  5, 6, or 7 stones for P1, that is P1 needs to leave 8 stones for P2, and so
  on. It's easy to see that if P1 takes 3 stone as the first move and leave 12
  stones to P2, P1 is guaranteed to win.
\end{proof}

\emph{Adapting existing proofs:} For example, $\sqrt{3}$ can be proved to be
irrational using a similar proof for $\sqrt{2}$ (work with $3d^2=c^2$).

\subsection{Looking for Counterexamples}

``Every positive integer is the sum of squares of three integers'' is false
(replace three by two is also false, see section 1.6) because 7 is an
counterexample. But it turns out that ``Every positive integer is the sum of
squares of four integers'' is true.

\subsection{Proof strategy in Action: Tiling}

We will illustrate aspects of proof strategy through a brief study of tilings of
checkerboards.

\begin{example}
  Can we tile the standard checkerboard using dominoes?
\end{example}

\begin{proof}[Solution:]
  Yes, an easy constructive existence proof.
\end{proof}

\begin{example}
  Can we tile a board obatined by removing one of the four corner squares?
\end{example}

\begin{proof}[Solution:]
  No, there will be an odd number of squares left. Suppose we can tile it using
  dominoes, then it must have an even number of squares, because each domino
  covers two squares and there's no overlap, contradiction, so we can't.
\end{proof}

\begin{example}
  Can we tile the board obtained by deleting the upper left and lower right
  corner squares?
\end{example}

\begin{proof}[Solution:]
  The resulting board does have an even number of squares. Trying and failing
  to tile it leads us to conjecture that no tiling exists. We might try to
  prove this by consider all possible cases, but that's too many cases.
  \\

  Notice that if we color the board as in a checkerboard, we see that in any
  tiling, each domino must covers one white and one black square. We can use
  this fact to construct a proof by contradiction:
  \\

  Suppose we can use dominoes to tile such a board. Note that this board has 62
  squares, so we will use 31 dominoes. Not that each domino covers one white
  and one black square. So this tiling covers 31 white squares and 31 black
  squares. However, if we remove two opposite corner squares, we either have 32
  white and 30 black, or 30 white and 32 black. This contradicts the
  assumption, so we cannot cover this board using dominoes.
\end{proof}

We can use other types of pieces besides dominoes. We will use shaped pieces
constructed from congruent squares, called \emph{polyminoes}, in particular,
\emph{straight triminoes} (and the L shaped \emph{right triominoes} in Section
4.1).

\begin{example}
  Can we use straight triominoes to tile a checkerboard?
\end{example}

\begin{proof}[Solution:]
  No, a checkerboard has 64 squares, which is not a multiple of 3.
\end{proof}

\begin{example}
  Can we use straight triominoes to tile a checkerboard with one corner
  missing? $64-1=63$ is divisible by 3.
\end{example}

\begin{proof}[Solution:]
  No, we give a similar proof by coloring the checkerboard. But we will use 3
  colors, 21 green, 21 black, 22 white, to color the whole checkerboard, in a
  way such that each triomino must cover a square of each color and that each
  of the tree colors appears in a corner square (this can be done by
  alternating the 3 colors diagonaly). WLOG, we may assume a green square is
  removed (we can always rotate the coloring), and this obviously leads to a
  contradiction, since there are 20 green, 21 black, and 22 white left.
  Therefore we cannot tile this board using straight triominoes.
\end{proof}

\subsection{The Role of Open Problems}

Fermat's last theorem: $x^n+y^n=z^n$ has no solution in integer $x$, $y$ and $z$
with $xyz\neq 0$ whenever $n$ is an integer with $n>2$. This is not proven
untill the 1990s, and it required hundreds of pages of advanced mathematics.
We now state an open problem, simple to describe, but seems quite difficult to
resolve.

\begin{example}
  Let $T$ be the transformation that sends an even integer $x$ to $x/2$ and an
  odd integer $x$ to $3x+1$. A famous conjecture, sometimes known as the
  \emph{$3x+1$ conjecture}, states that for all positive integers $x$, when
  we repeatedly apply $T$, we will eventually reach the integer 1. This has
  been verified for all integer up to $5.6\times10^{12}$.
\end{example}

\section{Additional Notes from the Exercises}

\emph{Fuzzy logic}, a truth value can be between 0 and 1. Say ``Fred is
happy'' has truth value 0.8 and ``John is happ'' has truth value 0.4.
\\

The \emph{dual} of a compound proposition that contiains only the logical
operators $\vee$, $\wedge$ and $\lnot$ is the compound proposition obtained
by replacing each $\vee$ by $\wedge$, each $\wedge$ by $\vee$, each
$\mathbf{T}$ by $\mathbf{F}$ and each $\mathbf{F}$ by $\mathbf{T}$. The dual
of $s$ is denoted by $s^*$.
\\

A collection of logical operators is called \emph{functionally complete} if
every compound proposition is logically equivalent to a compound proposition
involving only these logical operators.
\\

NAND: $\;|\;$, NOR: $\;\downarrow\;$.
\\

\emph{Null quantification}: we can use when a quantified variable that does
not appear in part of a statement, $P\iff\forall x\;P$ where $x$ does not occur
free in $P$.
\\

A statement is in \emph{prenex normal form (PNF)} if and only if it is of the
form
\begin{align*}
  Q_1x_1Q_2x_2\dots Q_kx_kP\left(x_1,x_2,\dots,x_k\right)
\end{align*}
where $Q_i$, $i=1,2,\dots,k$, is quantifiers and $P\left(x_1,\dots,x_k\right)$
is a predicate invloving no quantifiers.
