diff --git "a/wiki/proofwiki/shard_22.txt" "b/wiki/proofwiki/shard_22.txt" new file mode 100644--- /dev/null +++ "b/wiki/proofwiki/shard_22.txt" @@ -0,0 +1,14558 @@ +\section{Scaled Euclidean Metric is Metric} +Tags: Scaled Euclidean Metric + +\begin{theorem} +Let $\R_{>0}$ be the [[Definition:Set|set]] of [[Definition:Strictly Positive Integer|(strictly) positive integers]]. +Let $\delta: \R_{>0} \times \R_{>0} \to \R$ be the [[Definition:Metric|metric]] on $\R_{>0}$ defined as: +:$\forall x, y \in \R_{>0}: \delta \left({x, y}\right) = \dfrac {\left\lvert{x - y}\right\rvert} {x y}$ +Then $\delta$ is a [[Definition:Metric|metric]]. +\end{theorem} + +\begin{proof} +=== Proof of $M1$ === +{{begin-eqn}} +{{eqn | l = \delta \left({x, x}\right) + | r = \dfrac {\left\lvert{x - x}\right\rvert} {x^2} + | c = Definition of $\delta$ +}} +{{eqn | r = 0 + | c = as $\left\lvert{x - x}\right\rvert = 0$ +}} +{{end-eqn}} +So [[Definition:Metric Space Axioms|axiom $M1$]] holds for $\delta$. +{{qed|lemma}} +=== Proof of $M2$ === +{{begin-eqn}} +{{eqn | l = \delta \left({x, y}\right) + \delta \left({y, z}\right) + | r = \frac {\left\lvert{x - y}\right\rvert} {x y} + \dfrac {\left\lvert{y - z}\right\rvert} {y z} + | c = Definition of $\delta$ +}} +{{eqn | r = \frac {z \left\lvert{x - y}\right\rvert + x \left\lvert{y - z}\right\rvert} {x y z} + | c = [[Sum of Quotients of Real Numbers]] +}} +{{eqn | r = \frac {\left\lvert{x z - y z}\right\rvert + \left\lvert{x y - x z}\right\rvert} {x y z} + | c = Valid, as $x, z > 0$ +}} +{{eqn | o = \ge + | r = \frac {\left\lvert{x z - y z + x y - x z}\right\rvert} {x y z} + | c = [[Triangle Inequality]] +}} +{{eqn | r = \frac {\left\lvert{x y - y z}\right\rvert} {x y z} + | c = simplifying +}} +{{eqn | r = \frac {\left\lvert{x - z}\right\rvert} {x z} + | c = simplifying further +}} +{{eqn | r = \delta \left({x, z}\right) + | c = Definition of $\delta$ +}} +{{end-eqn}} +So [[Definition:Metric Space Axioms|axiom $M2$]] holds for $\delta$. +{{qed|lemma}} +=== Proof of $M3$ === +{{begin-eqn}} +{{eqn | l = \delta \left({x, y}\right) + | r = \frac {\left\lvert{x - y}\right\rvert} {x y} + | c = Definition of $\delta$ +}} +{{eqn | r = \frac {\left\lvert{y - x}\right\rvert} {y x} + | c = Definition of [[Definition:Absolute Value|Absolute Value]] and [[Real Multiplication is Commutative]] +}} +{{eqn | r = \delta \left({y, x}\right) + | c = Definition of $\delta$ +}} +{{end-eqn}} +So [[Definition:Metric Space Axioms|axiom $M3$]] holds for $\delta$. +{{qed|lemma}} +=== Proof of $M4$ === +{{begin-eqn}} +{{eqn | l = x + | o = \ne + | r = y + | c = +}} +{{eqn | ll= \implies + | l = \left\lvert{x - y}\right\rvert + | o = > + | r = 0 + | c = Definition of [[Definition:Absolute Value|Absolute Value]] +}} +{{eqn | ll= \implies + | l = \frac {\left\lvert{x - y}\right\rvert} {x y} + | o = > + | r = 0 + | c = as $x y > 0$ +}} +{{eqn | ll= \implies + | l = \delta \left({x, y}\right) + | o = > + | r = 0 + | c = Definition of $\delta$ +}} +{{end-eqn}} +So [[Definition:Metric Space Axioms|axiom $M4$]] holds for $\delta$. +{{qed}} +[[Category:Scaled Euclidean Metric]] +kpi92ur6wjx2s5ij2gy6b1hlycyrq3x +\end{proof}<|endoftext|> +\section{Topologies induced by Usual Metric and Scaled Euclidean Metric on Positive Integers are Homeomorphic} +Tags: Discrete Topology, Homeomorphisms + +\begin{theorem} +Let $\Z_{>0}$ be the [[Definition:Set|set]] of [[Definition:Strictly Positive Integer|(strictly) positive integers]]. +Let $d: \Z_{>0} \times \Z_{>0} \to \R$ be the [[Definition:Euclidean Metric on Real Number Line|usual (Euclidean) metric]] on $\Z_{>0}$. +Let $\delta: \Z_{>0} \times \Z_{>0} \to \R$ be the [[Definition:Metric|metric]] on $\Z_{>0}$ defined as: +:$\forall x, y \in \Z_{>0}: \map \delta {x, y} = \dfrac {\size {x - y} } {x y}$ +Let $\tau_d$ denote the [[Definition:Topology Induced by Metric|metric topology]] for $d$. +Let $\tau_\delta$ denote the [[Definition:Topology Induced by Metric|metric topology]] for $\delta$. +Then $\struct {\Z_{>0}, \tau_d}$ and $\struct {\Z_{>0}, \tau_\delta}$ are [[Definition:Homeomorphic Topological Spaces|homeomorphic]]. +\end{theorem} + +\begin{proof} +From [[Topology induced by Usual Metric on Positive Integers is Discrete]]: +:$\struct {\Z_{>0}, \tau_d}$ is a [[Definition:Discrete Space|discrete space]]. +From [[Topology induced by Scaled Euclidean Metric on Positive Integers is Discrete]]: +:$\struct {\Z_{>0}, \tau_\delta}$ is a [[Definition:Discrete Space|discrete space]]. +Let $I_{\Z_{>0} }$ be the [[Definition:Identity Mapping|identity mapping]] from $\Z_{>0}$ to itself. +From [[Mapping from Discrete Space is Continuous]]: +:$I_{\Z_{>0} }: \struct {\Z_{>0}, \tau_d} \to \struct {\Z_{>0}, \tau_\delta}$ is [[Definition:Everywhere Continuous Mapping (Topology)|continuous]] +and: +:$I_{\Z_{>0} }: \struct {\Z_{>0}, \tau_\delta} \to \struct {\Z_{>0}, \tau_d}$ is [[Definition:Everywhere Continuous Mapping (Topology)|continuous]]. +Hence the result by definition of [[Definition:Homeomorphic Topological Spaces|homeomorphic]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Cauchy Sequence in Positive Integers under Usual Metric is eventually Constant} +Tags: Euclidean Metric, Cauchy Sequences + +\begin{theorem} +Let $\Z_{>0}$ be the [[Definition:Set|set]] of [[Definition:Strictly Positive Integer|(strictly) positive integers]]. +Let $d: \Z_{>0} \times \Z_{>0} \to \R$ be the [[Definition:Euclidean Metric on Real Number Line|usual (Euclidean) metric]] on $\Z_{>0}$. +Let $\sequence {x_n}$ be a [[Definition:Cauchy Sequence (Metric Space)|Cauchy sequence]] in $\struct {\Z_{>0}, d}$. +Then: +:$\exists m, n \in \Z_{>0}: \forall r > n: x_r = m$ +That is, $\sequence {x_n}$ is eventually [[Definition:Constant|constant]]. +\end{theorem} + +\begin{proof} +Let $\sequence {x_n}$ be a [[Definition:Cauchy Sequence (Metric Space)|Cauchy sequence]] in $\struct {\Z_{>0}, d}$. +By definition: +:$\forall \epsilon \in \R_{>0}: \exists N \in \N: \forall m, n \in \N: m, n \ge N: \map d {x_n, x_m} < \epsilon$ +Let $\epsilon < 1$, say: $\epsilon = \dfrac 1 2$. +By the definition of $d$: +:$\forall m, n \in \N: x_m \ne x_n \implies \map d {x_m, x_n} \ge 1$ +So the only possible way for: +:$\forall m, n \in \N: m, n \ge N: \map d {x_n, x_m} < \epsilon$ +is for $x_m = x_n$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Positive Integers under Usual Metric is Complete Metric Space} +Tags: Euclidean Metric, Complete Metric Spaces + +\begin{theorem} +Let $\Z_{>0}$ be the [[Definition:Set|set]] of [[Definition:Strictly Positive Integer|(strictly) positive integers]]. +Let $d: \Z_{>0} \times \Z_{>0} \to \R$ be the [[Definition:Euclidean Metric on Real Number Line|usual (Euclidean) metric]] on $\Z_{>0}$. +Then $\struct {\Z_{>0}, d}$ is a [[Definition:Complete Metric Space|complete metric space]]. +\end{theorem} + +\begin{proof} +Let $\sequence {x_n}$ be a [[Definition:Cauchy Sequence (Metric Space)|Cauchy sequence]] in $\struct {\Z_{>0}, d}$. +From [[Cauchy Sequence in Positive Integers under Usual Metric is eventually Constant]]: +:$\sequence {x_n}$ is a [[Definition:Convergent Sequence (Metric Space)|convergent sequence]] to some $n \in \Z_{>0}$. +Hence the result by definition of [[Definition:Complete Metric Space|complete metric space]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Cauchy Sequence in Positive Integers under Scaled Euclidean Metric} +Tags: Scaled Euclidean Metric, Cauchy Sequences + +\begin{theorem} +Let $\Z_{>0}$ be the [[Definition:Set|set]] of [[Definition:Strictly Positive Integer|(strictly) positive integers]]. +Let $\delta: \Z_{>0} \times \Z_{>0} \to \R$ be the [[Definition:Scaled Euclidean Metric|scaled Euclidean metric]] on $\Z_{>0}$ defined as: +:$\forall x, y \in \Z_{>0}: \map \delta {x, y} = \dfrac {\size {x - y} } {x y}$ +The [[Definition:Sequence|sequence]] $\sequence {x_n}$ in $\Z_{>0}$ defined as: +:$\forall n \in \N: x_n = n$ +is a [[Definition:Cauchy Sequence (Metric Space)|Cauchy sequence]] in $\struct {\Z_{>0}, \delta}$. +\end{theorem} + +\begin{proof} +For a general $x_m, x_n \in \sequence {x_n}$ as defined: +{{begin-eqn}} +{{eqn | l = \map \delta {x, y} + | r = \frac {\size {x_m - x_n} } {x_m x_n} + | c = Definition of $\delta$ +}} +{{eqn | r = \size {\frac 1 {x_m} - \frac 1 {x_n} } + | c = algebra +}} +{{eqn | n = 1 + | r = \size {\frac 1 m - \dfrac 1 n} + | c = Definition of $\sequence {x_n}$ +}} +{{end-eqn}} +Let $\epsilon \in \R_{>0}$. +Then by the [[Archimedean Principle]]: +:$\exists N \in \N: N > \dfrac 1 \epsilon$ +from which it follows that: +:$\epsilon > \dfrac 1 N$ +Thus: +{{begin-eqn}} +{{eqn | lo= \forall m, n \in \N: + | l = m, n + | o = > + | r = N + | c = +}} +{{eqn | ll= \leadsto + | l = \map \delta {x_m, x_n} + | r = \size {\frac 1 m - \frac 1 n} + | c = from $(1)$ above +}} +{{eqn | o = < + | r = \max \set {\frac 1 m, \frac 1 n} + | c = +}} +{{eqn | o = < + | r = \frac 1 N + | c = +}} +{{eqn | o = < + | r = \epsilon + | c = +}} +{{end-eqn}} +Therefore $\sequence {x_n}$ is a [[Definition:Cauchy Sequence (Metric Space)|Cauchy sequence]] in $\struct {\Z_{>0}, \delta}$. +{{qed}} +\end{proof}<|endoftext|> +\section{Positive Integers under Scaled Euclidean Metric is not Complete Metric Space} +Tags: Scaled Euclidean Metric, Complete Metric Spaces + +\begin{theorem} +Let $\Z_{>0}$ be the [[Definition:Set|set]] of [[Definition:Strictly Positive Integer|(strictly) positive integers]]. +Let $\delta: \Z_{>0} \times \Z_{>0} \to \R$ be the [[Definition:Scaled Euclidean Metric|scaled Euclidean metric]] on $\Z_{>0}$ defined as: +:$\forall x, y \in \Z_{>0}: \map \delta {x, y} = \dfrac {\size {x - y} } {x y}$ +Then $\struct {\Z_{>0}, \delta}$ is not a [[Definition:Complete Metric Space|complete metric space]]. +\end{theorem} + +\begin{proof} +Consider the [[Definition:Sequence|sequence]] $\sequence {x_n}$ in $\Z_{>0}$ defined as: +:$\forall n \in \N: x_n = n$ +From [[Cauchy Sequence in Positive Integers under Scaled Euclidean Metric]]: +:$\sequence {x_n}$ is a [[Definition:Cauchy Sequence (Metric Space)|Cauchy sequence]] in $\struct {\Z_{>0}, \delta}$. +But $\sequence {x_n}$ is not [[Definition:Convergent Sequence (Metric Space)|convergent]] to any $m \in \Z_{>0}$. +Hence the result, by definition of [[Definition:Complete Metric Space|complete metric space]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Subset of Euclidean Plane whose Product of Coordinates are Greater Than or Equal to 1 is Closed} +Tags: Closed Sets, Real Number Plane with Euclidean Topology + +\begin{theorem} +Let $\struct {\R^2, \tau_d}$ be the [[Definition:Real Number Plane with Euclidean Topology|real number plane with the usual (Euclidean) topology]]. +Let $A \subseteq R^2$ be the [[Definition:Set|set]] of all points defined as: +:$A := \set {\tuple {x, y} \in \R^2: x y \ge 1}$ +Then $A$ is a [[Definition:Closed Set (Topology)|closed set]] in $\struct {\R^2, d}$. +\end{theorem} + +\begin{proof} +By definition, $\tau_d$ is the [[Definition:Topology Induced by Metric|topology induced]] by the [[Definition:Euclidean Metric on Real Number Plane|Euclidean metric]] $d$. +Consider the [[Definition:Relative Complement|complement of $A$ in $\R^2$]]: +:$A' := \R^2 \setminus A$ +Thus: +:$A := \set {\tuple {x, y} \in \R^2: x y < 1}$ +Let $a = \tuple {x_a, y_a} \in A^2$. +Let $\epsilon = \size {1 - x_a y_a}$. +Then the [[Definition:Open Ball of Metric Space|open $\epsilon$-ball]] of $a$ in $\R^2$ lies entirely in $A'$. +As $a$ is arbitrary, it follows that any such $a$ has an [[Definition:Open Ball of Metric Space|open $\epsilon$-ball]] of $a$ in $\R^2$ which lies entirely in $A'$. +Thus, by definition, $A'$ is [[Definition:Open Set of Metric Space|open]] in $\R^2$. +So, also by definition, $A$ is [[Definition:Closed Set of Metric Space|closed]] in $\R^2$. +{{qed}} +\end{proof}<|endoftext|> +\section{Projection on Real Euclidean Plane is Open Mapping} +Tags: Real Number Plane with Euclidean Topology, Open Mappings + +\begin{theorem} +Let $\struct {\R^2, d}$ be the [[Definition:Real Number Plane with Euclidean Topology|real number plane with the usual (Euclidean) topology]]. +Let $\rho: \R^2 \to \R$ be the [[Definition:First Projection|first projection]] on $\R^2$ defined as: +:$\forall \tuple{x, y} \in \R^2: \map \rho {x, y} = x$ +Then $\rho$ is an [[Definition:Open Mapping|open mapping]]. +The same applies with the [[Definition:Second Projection|second projection]] on $\R^2$. +\end{theorem} + +\begin{proof} +By definition, the [[Definition:Real Number Plane with Euclidean Topology|real number plane with the usual (Euclidean) topology]] on $\R^2$ is the [[Definition:Product Space (Topology)|product space]] of $\struct {\R, d}$ with $\struct {\R, d}$, where $\struct {\R, d}$ is the [[Definition:Real Number Line with Euclidean Topology|real number line with the usual (Euclidean) topology]] +The result follows from [[Projection from Product Topology is Open]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Projection on Real Euclidean Plane is not Closed Mapping} +Tags: Real Number Plane with Euclidean Topology, Closed Mappings + +\begin{theorem} +Let $\struct {\R^2, d}$ be the [[Definition:Real Number Plane with Euclidean Topology|real number plane with the usual (Euclidean) topology]]. +Let $\rho: \R^2 \to \R$ be the [[Definition:First Projection|first projection]] on $\R^2$ defined as: +:$\forall \tuple {x, y} \in \R^2: \map \rho {x, y} = x$ +Then $\rho$ is not a [[Definition:Closed Mapping|closed mapping]]. +The same applies with the [[Definition:Second Projection|second projection]] on $\R^2$. +\end{theorem} + +\begin{proof} +Consider the [[Definition:Set|set]] $A \subseteq R^2$ of all points defined as: +:$A := \set {\tuple {x, y} \in \R^2: x y \ge 1}$ +By [[Subset of Euclidean Plane whose Product of Coordinates are Greater Than or Equal to 1 is Closed]]: +:$A$ is a [[Definition:Closed Set (Topology)|closed set]] in $\struct {\R^2, d}$. +By inspection, it can be seen that the [[Definition:Image of Subset under Mapping|image of $A$ under $\rho$]] is: +:$\rho \sqbrk A = \openint \gets 0 \cup \openint 0 \to$ +which by [[Union of Open Sets of Metric Space is Open]] is [[Definition:Open Set (Topology)|open]]. +Hence the result by definition of [[Definition:Closed Mapping|closed mapping]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Complement of Set of Rational Pairs in Real Euclidean Plane is Arc-Connected} +Tags: Euclidean Space, Arc-Connected Spaces + +\begin{theorem} +Let $\struct {\R^2, d}$ be the [[Definition:Real Number Plane with Euclidean Topology|real number plane with the usual (Euclidean) topology]]. +Let $S \subseteq \R^2$ be the [[Definition:Subset|subset]] of $\R^2$ defined as: +:$\forall x, y \in \R^2: \tuple {x, y} \in S \iff x, y \in \Q$ +Hence let $A := \R^2 \setminus S$: +:$\tuple {x, y} \in A$ {{iff}} either $x$ or $y$ or both is [[Definition:Irrational Number|irrational]]. +Then $A$ is [[Definition:Arc-Connected|arc-connected]]. +\end{theorem} + +\begin{proof} +Let $\tuple {a, b} \in A$. +Consider any point $\tuple {x_1, y_1} \in A$ whose [[Definition:Coordinate (Coordinate System)|coordinates]] are both [[Definition:Irrational Number|irrational]]. +By definition, either $a$ or $b$ is [[Definition:Irrational Number|irrational]]. +{{WLOG}} suppose $a$ is [[Definition:Irrational Number|irrational]]. +Then the [[Definition:Set Union|union]] of the [[Definition:Straight Line|straight lines]] $x = a, y = y_1$ is an [[Definition:Arc-Connected|arc-connected]] [[Definition:Subset|subset]] of $A$ connecting $\tuple {x_1, y_1}$ to $\tuple {a, b}$. +Hence any point in $A$ can be connected to $\tuple {x_1, y_1}$ by an [[Definition:Arc (Topology)|arc]]. +Hence the result, by definition of [[Definition:Arc-Connected|arc-connected]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Empty Set is Compact Space} +Tags: Compact Spaces, Empty Set + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Then the [[Definition:Empty Set|empty set]] $\varnothing$ is a [[Definition:Compact Subspace|compact subspace]] of $T$. +\end{theorem} + +\begin{proof} +Recall the definition of [[Definition:Compact Subspace|compact subspace]]: +:$\left({\varnothing, \tau_\varnothing}\right)$ is '''compact in $T$''' {{iff}} every [[Definition:Open Cover|open cover]] $\mathcal C \subseteq \tau_\varnothing$ for $\varnothing$ has a [[Definition:Finite Subcover|finite subcover]]. +The only [[Definition:Open Cover|open cover]] for $\varnothing$ that is contained in $\varnothing$ is $\left\{{\varnothing}\right\}$ itself. +This has only one [[Definition:Finite Subcover|finite subcover]], and that is $\left\{{\varnothing}\right\}$. +This is a [[Definition:Finite Subcover|finite subcover]]. +Hence the result, by definition of [[Definition:Compact Subspace|compact subspace]]. +{{qed}} +[[Category:Compact Spaces]] +[[Category:Empty Set]] +40suo9x8c2qi1cy4gxxnfxno2z386bq +\end{proof}<|endoftext|> +\section{Alexandroff Extension is Topology} +Tags: Alexandroff Extensions + +\begin{theorem} +Let $T = \struct {S, \tau}$ be a [[Definition:Non-Empty Set|non-empty]] [[Definition:Topological Space|topological space]]. +Let $p$ be a [[Definition:New Element|new element]] not in $S$. +Let $S^* := S \cup \set p$. +Let $T^* = \struct {S^*, \tau^*}$ be the [[Definition:Alexandroff Extension|Alexandroff extension]] on $S$. +Then $\tau^*$ is a [[Definition:Topology|topology]] on $S^*$. +\end{theorem} + +\begin{proof} +Recall the definition of the [[Definition:Alexandroff Extension|Alexandroff extension]] on $S$: +$U$ is [[Definition:Open Set (Topology)|open]] in $T^*$ {{iff}}: +:$U$ is an [[Definition:Open Set (Topology)|open set]] of $T$ +or +:$U$ is the [[Definition:Relative Complement|complement]] in $T^*$ of a [[Definition:Closed Set (Topology)|closed]] and [[Definition:Compact Topological Subspace|compact subset]] of $T$. +Each of the [[Definition:Open Set Axioms|open set axioms]] is examined in turn: +=== $\text O 1$: Union of Open Sets === +Let $\family {U_i}_{i \mathop \in I}$ be an [[Definition:Indexed Family|indexed family]] of [[Definition:Open Set (Topology)|open sets]] of $T^*$. +Some (perhaps all, perhaps none) of the $U_i$ are [[Definition:Open Set (Topology)|open sets]] of $T$. +The rest of them are each the [[Definition:Relative Complement|complement]] of a [[Definition:Closed Set (Topology)|closed]] and [[Definition:Compact Topological Subspace|compact subset]] of $T$. +Let $J \subseteq I$ be the [[Definition:Subset|subset]] of $I$ consisting of the [[Definition:Index (Indexing Set)|indices]] of the former [[Definition:Open Set (Topology)|open sets]] of $T^*$. +Let $K = I \setminus J$ be the [[Definition:Subset|subset]] of $I$ consisting of the [[Definition:Index (Indexing Set)|indices]] of the latter [[Definition:Open Set (Topology)|open sets]] of $T^*$. +Let $\displaystyle \UU_J = \bigcup_{j \mathop \in J} U_j$ be the [[Definition:Union of Family|union]] of $\family {U_j}_{j \mathop \in J}$. +By definition, each $S \setminus U_j$ is [[Definition:Closed Set (Topology)|closed]] in $T$. +From [[Intersection of Closed Sets is Closed in Topological Space]]: +:$\displaystyle \VV_J := \bigcap_{j \mathop \in J} \paren {S \setminus U_j}$ is [[Definition:Closed Set (Topology)|closed]] in $T$ +By [[De Morgan's Laws (Set Theory)/Set Difference/Family of Sets/Difference with Union|De Morgan's Laws]]: +:$\displaystyle S \setminus \UU_J = \VV_J = \bigcap_{j \mathop \in J} \paren {S \setminus U_j}$ +By definition of [[Definition:Closed Set (Topology)|closed set]] it follows that $\UU_J$ is [[Definition:Open Set (Topology)|open]] in $T$. +By definition of the [[Definition:Alexandroff Extension|Alexandroff extension]] on $S$, it follows that $\UU_J$ is [[Definition:Open Set (Topology)|open]] in $T^*$. +Let $\displaystyle \UU_K = \bigcup_{k \mathop \in K} U_k$ be the [[Definition:Union of Family|union]] of $\family {U_k}_{k \mathop \in K}$. +Let $m \in J$ be arbitrary. +Let $\displaystyle \UU_K' = \bigcup_{\substack {k \mathop \in K \\ k \mathop \ne m} } U_k$. +Then by [[De Morgan's Laws]]: +:$\displaystyle S \setminus \UU_K' = \bigcap_{\substack {k \mathop \in K \\ k \mathop \ne m} } \paren {S \setminus U_k}$ +Let: +:$\displaystyle \VV_K' := S \setminus \UU_K' = \bigcap_{\substack {k \mathop \in K \\ k \mathop \ne m} } \paren {S \setminus U_k}$ +Each of $S \setminus U_k$ is [[Definition:Closed Set (Topology)|closed]] and [[Definition:Compact Topological Subspace|compact]] in $T$. +From [[Intersection of Closed Sets is Closed in Topological Space]]: +:$\VV_K'$ is [[Definition:Closed Set (Topology)|closed]] in $T$. +But $S \setminus U_m$ is also [[Definition:Closed Set (Topology)|closed]] in $T$. +$S \setminus U_m$ is also [[Definition:Compact Topological Subspace|compact]] in $T$. +Let $\VV_K := \VV_K' \cap \paren {S \setminus U_m}$. +So from [[Intersection of Closed Sets is Closed in Topological Space]]: +:$\VV_K$ is [[Definition:Closed Set (Topology)|closed]] in $T$ +and from [[Intersection of Closed Set with Compact Subspace is Compact]]: +:$\VV_K$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +But: +:$\UU_K = S \setminus \VV_K$ +and so by definition $\UU_K$ is [[Definition:Open Set (Topology)|open]] in $T^*$. +Finally: +{{begin-eqn}} +{{eqn | l = \displaystyle \UU = \bigcup_{i \mathop \in I} U_i + | r = \UU_J \cup \UU_K + | c = Definition of $\UU_J$ and $\UU_K$ +}} +{{eqn | ll= \leadsto + | l = S \setminus \UU + | r = S \setminus \paren {\UU_J \cup \UU_K} + | c = +}} +{{eqn | r = \paren {S \setminus \UU_J} \cap \paren {S \setminus \UU_K} + | c = +}} +{{end-eqn}} +By definition, both $\paren {S \setminus \UU_J}$ and $\paren {S \setminus \UU_K}$ have been demonstrated to be [[Definition:Closed Set (Topology)|closed]] in $T^*$. +So by [[Intersection of Closed Sets is Closed in Topological Space]]: +:$\paren {S \setminus \UU_J} \cap \paren {S \setminus \UU_K}$ is [[Definition:Closed Set (Topology)|closed]] in $T^*$. +Thus $\UU = \UU_J \cup \UU_K$ is an [[Definition:Open Set (Topology)|open set]] of $T^*$. +{{qed|lemma}} +=== $\text O 2$: Intersection of Open Sets === +Let $U_1$ and $U_2$ be [[Definition:Open Set (Topology)|open sets]] of $T^*$. +$(1): \quad$ Suppose $U_1$ and $U_2$ are both [[Definition:Open Set (Topology)|open sets]] of $T$. +Then as $T$ is a [[Definition:Topological Space|topological space]], $U_1 \cap U_2$ is [[Definition:Open Set (Topology)|open]] in $T$. +By definition of the [[Definition:Alexandroff Extension|Alexandroff extension]] on $S$, it follows that $U_1 \cap U_2$ is [[Definition:Open Set (Topology)|open]] in $T^*$. +$(2): \quad$ Suppose that neither $U_1$ and $U_2$ is an [[Definition:Open Set (Topology)|open set]] of $T$. +Then both of their [[Definition:Relative Complement|complements]] $S \setminus U_1$ and $S \setminus U_2$ in $S$ are both [[Definition:Closed Set (Topology)|closed]] and [[Definition:Compact Topological Subspace|compact]] in $T$. +From [[Finite Union of Compact Sets is Compact]], $\paren {S \setminus U_1} \cup \paren {S \setminus U_2}$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +From [[Finite Union of Closed Sets is Closed in Topological Space]], $\paren {S \setminus U_1} \cup \paren {S \setminus U_2}$ is [[Definition:Closed Set (Topology)|closed]] in $T$. +But by [[De Morgan's Laws (Set Theory)/Set Difference/Difference with Intersection|De Morgan's Laws]]: +:$S \setminus \paren {U_1 \cap U_2} = \paren {S \setminus U_1} \cup \paren {S \setminus U_2}$ +Thus $S \setminus \paren {U_1 \cap U_2}$ is both [[Definition:Closed Set (Topology)|closed]] and [[Definition:Compact Topological Subspace|compact]] in $T$. +Hence by definition $U_1 \cap U_2$ is an [[Definition:Open Set (Topology)|open set]] in $T^*$. +$(3): \quad$ Suppose that either $U_1$ or $U_2$ (but not both) is an [[Definition:Open Set (Topology)|open set]] of $T$. +{{WLOG}}, suppose $U_1$ is an [[Definition:Open Set (Topology)|open set]] of $T$ and $U_2$ is not. +As $U_1$ is an [[Definition:Open Set (Topology)|open set]] of $T$ it follows that $p \notin U_1$. +Thus it follows that $p \notin U_1 \cap U_2$. +We have that $S^* \setminus U_2$ is not an [[Definition:Open Set (Topology)|open set]] of $T$. +Thus $S^* \setminus U_2$ is both [[Definition:Closed Set (Topology)|closed]] and [[Definition:Compact Topological Subspace|compact]] in $T$. +From [[Finite Union of Closed Sets is Closed in Topological Space]], $\paren {S \setminus U_1} \cup \paren {S \setminus U_2}$ is [[Definition:Closed Set (Topology)|closed]] in $T$. +But by [[De Morgan's Laws (Set Theory)/Set Difference/Difference with Intersection|De Morgan's Laws]]: +:$S \setminus \paren {U_1 \cap U_2} = \paren {S \setminus U_1} \cup \paren {S \setminus U_2}$ +Thus $S \setminus \paren {U_1 \cap U_2}$ is [[Definition:Closed Set (Topology)|closed]] in $T$. +But as $p \notin S_1 \cap S^2$ it follows that $U_1 \cap U_2$ is an [[Definition:Open Set (Topology)|open set]] of $T$. +Hence by definition $U_1 \cap U_2$ is an [[Definition:Open Set (Topology)|open set]] of $T^*$. +{{qed|lemma}} +=== $\text O 3$: Underlying Set === +From [[Relative Complement with Self is Empty Set]]: +:the [[Definition:Relative Complement|complement of $S^*$ relative to $S^*$]] is $\O$. +From [[Empty Set is Compact Space]], $\O$ is a [[Definition:Compact Topological Subspace|compact subspace]] of $T^*$. +Hence by definition of the [[Definition:Alexandroff Extension|Alexandroff extension]], $S^*$ is [[Definition:Open Set (Topology)|open]] in $T^*$. +{{qed|lemma}} +All the [[Definition:Open Set Axioms|open set axioms]] are fulfilled, and the result follows. +{{qed}} +\end{proof}<|endoftext|> +\section{Intersection of Closed Set with Compact Subspace is Compact} +Tags: Compact Spaces, Closed Sets, Intersection of Closed Set with Compact Subspace is Compact + +\begin{theorem} +Let $T = \struct {S, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let $H \subseteq S$ be [[Definition:Closed Set (Topology)|closed]] in $T$. +Let $K \subseteq S$ be [[Definition:Compact Topological Subspace|compact]] in $T$. +Then $H \cap K$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +\end{theorem} + +\begin{proof} +Let $\tau_K$ be the [[Definition:Subspace Topology|subspace topology]] on $K$. +Let $T_K = \left({K, \tau_K}\right)$ be the [[Definition:Topological Subspace|topological subspace]] determined by $K$. +By [[Closed Set in Topological Subspace]], $H \cap K$ is [[Definition:Closed Set (Topology)|closed]] in $T_K$. +By [[Closed Subspace of Compact Space is Compact]], $H \cap K$ is [[Definition:Compact Topological Subspace|compact]] in $T_K$. +By [[Compact in Subspace is Compact in Topological Space]], $H \cap K$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +{{qed}} +\end{proof} + +\begin{proof} +Let $\left\langle{U_\alpha}\right\rangle$ be an [[Definition:Open Cover|open cover]] of $H \cap K$: +:$\displaystyle H \cap K \subseteq \bigcup_\alpha U_\alpha$ +Then: +:$\displaystyle K \subseteq \bigcup_\alpha U_\alpha \cup \left({S \setminus H}\right)$ +Since $H$ is [[Definition:Closed Set (Topology)|closed]] in $T$, $\left({S \setminus H}\right)$ is [[Definition:Open Set (Topology)|open]] in $T$. +Hence $\left\langle{U_\alpha}\right\rangle \cup S \setminus H$ is an [[Definition:Open Cover|open cover]] of $H$. +We have that $K$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +It follows by definition that a [[Definition:Finite Subcover|finite subcover]]: +:$\left\{{U_{\alpha_1}, U_{\alpha_2}, \ldots, U_{\alpha_n}, S \setminus H}\right\}$ +of $H$ exists. +Thus: +:$H \cap K \subseteq \left\{{U_{\alpha_1}, U_{\alpha_2}, \ldots, U_{\alpha_n}}\right\}$ +and $H \cap K$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +{{qed}} +\end{proof}<|endoftext|> +\section{Finite Union of Compact Sets is Compact} +Tags: Union, Set Union, Compact Spaces, Set Union + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $n \in \N$ be a [[Definition:Natural Number|natural number]]. +Let $\left\langle{U_i}\right\rangle_{1 \mathop \le i \mathop \le n}$ be a [[Definition:Finite Sequence|finite sequence]] of [[Definition:Compact Topological Subspace|compact subsets]] of $T$. +Let $\mathcal U_n := \displaystyle \bigcup_{i \mathop = 1}^n U_i$ be the [[Definition:Union of Family|union]] of $\left\langle{U_i}\right\rangle$. +Then $\mathcal U_n$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +\end{theorem} + +\begin{proof} +Proof by [[Principle of Mathematical Induction|induction]]: +For all $n \in \N_{> 0}$, let $P \left({n}\right)$ be the [[Definition:Proposition|proposition]]: +:$\mathcal U_n := \displaystyle \bigcup_{i \mathop = 1}^n U_i$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +$P \left({0}\right)$ is the case: +:$\mathcal U_0 := \displaystyle \bigcup_{i \mathop = 1}^0 U_i$ +From [[Union of Empty Set]]: +:$\displaystyle \bigcup_{i \mathop = 1}^0 U_i = \varnothing$ +From [[Empty Set is Compact Space]] it follows that: +:$\mathcal U_0$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +$P \left({1}\right)$ is true, as this just says: +:$U_1$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +=== Basis for the Induction === +$P \left({2}\right)$ is the case: +:$U_1 \cup U_2$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +which is proved as follows: +Let $\mathcal C$ be an [[Definition:Open Cover|open cover]] of $U_1 \cup U_2$. +Then $\mathcal C$ is an [[Definition:Open Cover|open cover]] of both $U_1$ and $U_2$. +As $U_1$ and $U_2$ are both [[Definition:Compact Topological Subspace|compact]] in $T$: +:$U_1$ has a [[Definition:Finite Subcover|finite subcover]] $C_1$ of $\mathcal C$ +:$U_2$ has a [[Definition:Finite Subcover|finite subcover]] $C_2$ of $\mathcal C$. +Their [[Definition:Set Union|union]] $C_1 \cup C_2$ is a [[Definition:Finite Subcover|finite subcover]] of $\mathcal C$ for $U_1 \cup U_2$. +From [[Union of Finite Sets is Finite]] it follows that $C_1 \cup C_2$ is [[Definition:Finite Subcover|finite]]. +As $\mathcal C$ is arbitrary, it follows by definition that $U_1$ and $U_2$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +This is our [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $P \left({k}\right)$ is true, where $k \ge 2$, then it logically follows that $P \left({k+1}\right)$ is true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$\mathcal U_k := \displaystyle \bigcup_{i \mathop = 1}^k U_i$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +Then we need to show: +:$\mathcal U_{k+1} := \displaystyle \bigcup_{i \mathop = 1}^{k+1} U_i$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +We have that: +:$\displaystyle \bigcup_{i \mathop = 1}^{k+1} U_i = \left({\bigcup_{i \mathop = 1}^k U_i}\right) \cup U_{k+1}$ +By the [[Finite Union of Compact Sets is Compact#Induction Hypothesis|induction hypothesis]]: +:$\mathcal U_k$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +By the [[Finite Union of Compact Sets is Compact#Basis for the Induction|basis for the induction]]: +:$\mathcal U_k \cup U_{k+1}$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +So $P \left({k}\right) \implies P \left({k+1}\right)$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\displaystyle \forall n \in \N: \bigcup_{i \mathop = 1}^n U_i$ is [[Definition:Compact Topological Subspace|compact]] in $T$. +{{qed}} +[[Category:Compact Spaces]] +[[Category:Set Union]] +izenikmugsid70js3fh0qx26zm046b3 +\end{proof}<|endoftext|> +\section{Principle of Mathematical Induction} +Tags: Number Theory, Named Theorems, Mathematical Induction, Proof Techniques, Principle of Mathematical Induction + +\begin{theorem} +Let $\map P n$ be a [[Definition:Propositional Function|propositional function]] depending on $n \in \Z$. +Let $n_0 \in \Z$ be given. +Suppose that: +:$(1): \quad \map P {n_0}$ is [[Definition:True|true]] +:$(2): \quad \forall k \in \Z: k \ge n_0 : \map P k \implies \map P {k + 1}$ +Then: +:$\map P n$ is [[Definition:True|true]] for all $n \in \Z$ such that $n \ge n_0$. +\end{theorem} + +\begin{proof} +Let $\Z_{\ge n_0}$ denote the [[Definition:Set|set]]: +:$S = \set {n \in \Z: n \ge n_0}$ +Let $S$ be the [[Definition:Set|set]] of [[Definition:Integer|integers]] defined as: +:$S = \set {n \in \Z_{\ge n_0}: \map P n}$ +That is, the set of all [[Definition:Integer|integers]] for which $n \ge n_0$ and for which $\map P n$ holds. +From [[Subset of Set with Propositional Function]] we have that: +:$S \subseteq \Z_{\ge n_0}$ +From $(1)$ we have that $\map P {n_0}$. +Hence $n_0 \in S$. +Let $k \in S$. +Then $\map P k$ holds. +But by $(2)$, $\map P {k + 1}$ also holds. +This implies $k + 1 \in S$. +So as: +:$S \subseteq \Z_{\ge n_0}$ +and: +:$S$ satisfies $(1)$ and $(2)$ +it follows by the [[Principle of Finite Induction]] that $S = \Z_{\ge n_0}$. +Hence for all $n \ge n_0$, $\map P n$ holds. +{{qed}} +\end{proof}<|endoftext|> +\section{Principle of Finite Induction} +Tags: Mathematical Induction, Proof Techniques, Principle of Finite Induction + +\begin{theorem} +Let $S \subseteq \Z$ be a [[Definition:Subset|subset]] of the [[Definition:Integer|integers]]. +Let $n_0 \in \Z$ be given. +Suppose that: +:$(1): \quad n_0 \in S$ +:$(2): \quad \forall n \ge n_0: n \in S \implies n + 1 \in S$ +Then: +:$\forall n \ge n_0: n \in S$ +That is: +:$S = \set {n \in \Z: n \ge n_0}$ +\end{theorem} + +\begin{proof} +Let $\Z_{\ge n_0} := \set {n \in \Z: n \ge n_0}$. +{{AimForCont}} $S \ne \Z_{\ge n_0}$. +Let $S' = \Z_{\ge n_0} \setminus S$. +Because $S \ne \Z_{\ge n_0}$ and $S \subseteq \Z_{\ge n_0}$, we have that $S' \ne \O$. +By definition, $\Z_{\ge n_0}$ is [[Definition:Bounded Below Set|bounded below]] by $n_0$. +From [[Set of Integers Bounded Below by Integer has Smallest Element]], $S'$ has a [[Definition:Minimal Element|minimal element]]. +Let $k$ be this [[Definition:Minimal Element|minimal element]] of $S'$. +By $(1)$ we have that: +:$n_0 \in S$ +and so: +:$n_0 \notin S'$ +Hence: +:$k \ne n_0$ +and so: +:$k > n_0$ +It follows that: +:$k - 1 \le n_0$ +Because $k$ is the [[Definition:Minimal Element|minimal element]] of $S'$: +:$k - 1 \notin S'$ +and so: +:$k - 1 \in S$ +But by $(2)$: +:$\paren {k - 1} + 1 = k \in S$ +So we have: +:$k \in S$ +and: +:$k \notin S$ +Hence by [[Proof by Contradiction]] $S = \Z_{\ge n_0}$. +{{qed}} +\end{proof} + +\begin{proof} +{{questionable|This only takes on board a subset of $\N$, where we need a subset of $\Z$}} +Consider $\N$ defined as a [[Definition:Naturally Ordered Semigroup|naturally ordered semigroup]]. +The result follows directly from [[Principle of Mathematical Induction for Naturally Ordered Semigroup/General Result|Principle of Mathematical Induction for Naturally Ordered Semigroup: General Result]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Principle of Mathematical Induction/Peano Structure} +Tags: Peano's Axioms, Principle of Mathematical Induction + +\begin{theorem} +Let $\struct {P, s, 0}$ be a [[Definition:Peano Structure|Peano structure]]. +Let $\map Q n$ be a [[Definition:Propositional Function|propositional function]] depending on $n \in P$. +Suppose that: +:$(1): \quad \map Q 0$ is [[Definition:True|true]] +:$(2): \quad \forall n \in P: \map Q n \implies \map Q {\map s n}$ +Then: +:$\forall n \in P: \map Q n$ +\end{theorem} + +\begin{proof} +Let $A \subseteq P$ be defined by: +:$A := \set {n \in P: \map Q n}$ +From $(1)$, $0 \in A$. +From $(2)$: +:$\forall n \in P: n \in A \implies \map s n \in A$ +As this holds for all $n \in P$, it holds [[Definition:A Fortiori|a fortiori]] for all $n \in A$. +Thus the condition: +:$n \in A \implies \map s n \in A$ +is satisfied. +So by [[Axiom:Peano's Axioms|Axiom $(\text P 5)$ of the Peano Axioms]]: +:$A = P$ +That is: +:$\forall n \in P: \map Q n$ +{{qed}} +\end{proof}<|endoftext|> +\section{Principle of Finite Induction/Peano Structure} +Tags: Peano's Axioms + +\begin{theorem} +Let $\struct {P, s, 0}$ be a [[Definition:Peano Structure|Peano structure]]. +Let $S \subseteq P$. +Suppose that: +:$(1): \quad 0 \in S$ +:$(2): \quad \forall n: n \in S \implies \map s n \in S$ +Then: +:$S = P$ +\end{theorem} + +\begin{proof} +This is nothing but a reformulation of Axiom $(P5)$ of the [[Axiom:Peano's Axioms|Peano Axioms]]. +{{qed}} +[[Category:Peano's Axioms]] +o33monttem5erz7x2wmjus9jyovdugs +\end{proof}<|endoftext|> +\section{Principle of Mathematical Induction/Naturally Ordered Semigroup} +Tags: Naturally Ordered Semigroup, Principle of Mathematical Induction + +\begin{theorem} +Let $\struct {S, \circ, \preceq}$ be a [[Definition:Naturally Ordered Semigroup|naturally ordered semigroup]]. +Let $T \subseteq S$ such that $0 \in T$ and $n \in T \implies n \circ 1 \in T$. +Then $T = S$. +\end{theorem} + +\begin{proof} +{{AimForCont}} that $T \subsetneq S$. +That is, $T$ is a [[Definition:Proper Subset|proper subset]] of $S$: +: $T \ne S$ +Let $T' = S \setminus T$. +Then by [[Set Difference with Proper Subset]]: +:$T' \ne \O$ +By [[Definition:Naturally Ordered Semigroup Axioms|axiom $NO1$]], $S$ is [[Definition:Well-Ordered Set|well-ordered]]. +By definition of [[Definition:Well-Ordered Set|well-ordered set]], it follows that $T'$ has a [[Definition:Smallest Element|smallest element]] $x$. +By definition of $T$: +:$0 \in T$ +and so by definition of $T'$: +:$0 \notin T'$ +so: +:$0 \prec x$ +By [[Sum with One is Immediate Successor in Naturally Ordered Semigroup]]: +:$1 \preceq x$ +By the definition of a [[Definition:Naturally Ordered Semigroup|naturally ordered semigroup]]: +:$\exists y \in S: y \circ 1 = x$ +Again by [[Sum with One is Immediate Successor in Naturally Ordered Semigroup]]: +:$y \prec x$ +We have that $x$ is the [[Definition:Smallest Element|smallest element]] of $T'$ and $y \prec x$. +Therefore: +:$y \notin T'$ +and so +:$y \in T$ +But from the definition of $T$: +:$y \in T \implies y \circ 1 = x \in T$ +But then by the definition of $T'$: +:$x \in T' \implies x \notin T$ +From this [[Definition:Contradiction|contradiction]], it follows that: +:$T = S$ +{{qed}} +\end{proof}<|endoftext|> +\section{Consecutive Fibonacci Numbers are Coprime} +Tags: Fibonacci Numbers, Coprime Integers, Proofs by Induction + +\begin{theorem} +Let $F_k$ be the $k$th [[Definition:Fibonacci Numbers|Fibonacci number]]. +Then: +:$\forall n \ge 2: \gcd \set {F_n, F_{n + 1} } = 1$ +where $\gcd \set {a, b}$ denotes the [[Definition:Greatest Common Divisor of Integers|greatest common divisor]] of $a$ and $b$. +That is, a [[Definition:Fibonacci Numbers|Fibonacci number]] and the one next to it are [[Definition:Coprime Integers|coprime]]. +\end{theorem} + +\begin{proof} +From the definition of [[Definition:Fibonacci Numbers|Fibonacci numbers]]: +:$F_1 = 1, F_2 = 1, F_3 = 2, F_4 = 3$ +Proof by [[Principle of Mathematical Induction|induction]]: +For all $n \in \N_{>0}$, let $\map P n$ be the [[Definition:Proposition|proposition]]: +:$\gcd \set {F_n, F_{n + 1} } = 1$ +=== Basis for the Induction === +$\map P 2$ is the case: +:$\gcd \set {F_2, F_3} = \gcd \set {2, 3} = 1$ +Thus $\map P 2$ is seen to hold. +This is our [[Definition:Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $\map P k$ is true, where $k \ge 2$, then it logically follows that $\map P {k + 1}$ is true. +So this is our [[Definition:Induction Hypothesis|induction hypothesis]]: +:$\gcd \set {F_k, F_{k + 1} } = 1$ +Then we need to show: +:$\gcd \set {F_{k + 1}, F_{k + 2} } = 1$ +=== Induction Step === +This is our [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = \gcd \set {F_{k + 1}, F_{k + 2} } + | r = \gcd \set {F_{k + 1}, F_{k + 2} - F_{k + 1} } + | c = [[Common Divisor Divides Integer Combination]] +}} +{{eqn | r = \gcd \set {F_{k + 1}, F_k} + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = \gcd \set {F_k, F_{k + 1} } + | c = +}} +{{eqn | r = 1 + | c = [[Consecutive Fibonacci Numbers are Coprime#Induction Hypothesis|Induction Hypothesis]] +}} +{{end-eqn}} +So $\map P k \implies \map P {k + 1}$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\forall n \ge 2: \gcd \set {F_n, F_{n + 1} } = 1$ +{{qed}} +\end{proof}<|endoftext|> +\section{Divisibility of Fibonacci Number} +Tags: Fibonacci Numbers, Divisors, Proofs by Induction + +\begin{theorem} +:$\forall m, n \in \Z_{> 2} : m \divides n \iff F_m \divides F_n$ +where $\divides$ denotes [[Definition:Divisor of Integer|divisibility]]. +\end{theorem} + +\begin{proof} +From the initial definition of [[Definition:Fibonacci Number|Fibonacci numbers]]: +:$F_1 = 1, F_2 = 1, F_3 = 2, F_4 = 3$ +Let $n = k m - r$ where $0 \le r < m$ +We have: +:$m \divides n \iff r = 0$ +The proof proceeds by [[Principle of Mathematical Induction|induction]] on $k$. +For all $k \in \N_{>0}$, let $\map P k$ be the [[Definition:Proposition|proposition]]: +:$r = 0 \iff F_m \divides F_{k m - r}$ +=== Basis for the Induction === +$\map P 1$ is the case: +:$r = 0 \iff F_m \divides F_{m - r}$ +which holds because $F_{m - r} < F_m$ unless $r = 0$. +This is our [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $\map P k$ is true, where $k > 1$, then it logically follows that $\map P {k + 1}$ is true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$r = 0 \iff F_m \divides F_{k m - r}$ +Then we need to show: +:$r = 0 \iff F_m \divides F_{k m + m - r}$ +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +Let $F_{k m - r} = a F_m + b$ where $0 \le b < F_m$. +We have: +:$b = 0 \iff F_m \divides F_{k m - r} \iff r = 0$ +by the [[Divisibility of Fibonacci Number#Induction Hypothesis|induction hypothesis]]. +{{begin-eqn}} +{{eqn | l = F_{k m + m - r} + | r = F_{m - 1} F_{k m - r} + F_m F_{k m - r + 1} + | c = [[Fibonacci Number in terms of Smaller Fibonacci Numbers]] +}} +{{eqn | r = a F_m F_{m - 1} + b F_{m - 1} + F_m F_{k m - r + 1} + | c = +}} +{{eqn | r = F_m \paren {a F_{m - 1} + F_{k m - r + 1} } + b F_{m - 1} + | c = +}} +{{end-eqn}} +We have that $F_{m - 1}$ and $F_m$ are [[Definition:Coprime Integers|coprime]] by [[Consecutive Fibonacci Numbers are Coprime]]. +Let $F_m \divides b F_{m - 1}$. +Then there exists an integer $k$ such that $k F_m \divides b F_{m - 1}$, by [[Definition:Divisor of Integer|the definition of divisibility]]. +Then: +:$\dfrac k b = \dfrac {F_{m - 1} } {F_m}$ +We have that $F_{m - 1}$ and $F_m$ are [[Definition:Coprime Integers|coprime]]. +Thus by [[Coprime Numbers form Fraction in Lowest Terms]]: +:$\dfrac {F_{m - 1} } {F_m}$ is in [[Definition:Canonical Form of Rational Number|canonical form]]. +Then by [[Ratios of Fractions in Lowest Terms]] +:$F_m \divides b$ +Because $0 \le b < F_m$, the only case is when $b = 0$. +Therefore: +:$F_m \divides b F_{m - 1} \iff b = 0$ +Therefore: +:$F_m \divides F_{k m + m - r} \iff F_m \divides b F_{m - 1} \iff b = 0 \iff r = 0$ +So $\map P k \implies \map P {k + 1}$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\forall m, n > 2 : m \divides n \iff F_m \divides F_n$ +{{qed}} +\end{proof}<|endoftext|> +\section{Fibonacci Number in terms of Smaller Fibonacci Numbers} +Tags: Fibonacci Numbers, Fibonacci Number in terms of Smaller Fibonacci Numbers + +\begin{theorem} +:$\forall m, n \in \Z_{>0}: F_{m + n} = F_{m - 1} F_n + F_m F_{n + 1}$ +\end{theorem} + +\begin{proof} +From the initial definition of [[Definition:Fibonacci Number|Fibonacci numbers]], we have: +:$F_1 = 1, F_2 = 1, F_3 = 2, F_4 = 3$ +Proof by [[Principle of Mathematical Induction|induction]]: +For all $n \in \Z_{>0}$, let $P \left({n}\right)$ be the [[Definition:Proposition|proposition]]: +:$\displaystyle \forall m \in \Z_{>0} : F_{m + n} = F_{m - 1} F_n + F_m F_{n + 1}$ +=== Basis for the Induction === +$P \left({1}\right)$ is the case: +{{begin-eqn}} +{{eqn | l = F_{m + 1} + | r = F_{m - 1} + F_m + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = F_{m - 1} \times 1 + F_m \times 1 + | c = +}} +{{eqn | r = F_{m - 1} F_1 + F_m F_2 + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = F_{m - 1} F_n + F_m F_{n + 1} + | c = for $n = 1$ +}} +{{end-eqn}} +and so $P \left({1}\right)$ is seen to hold. +$P \left({2}\right)$ is the case: +{{begin-eqn}} +{{begin-eqn}} +{{eqn | l = F_{m + 2} + | r = F_{m + 1} + F_m + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = F_{m - 1} + F_m + F_m + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = F_{m - 1} \times 1 + F_m \times 2 + | c = +}} +{{eqn | r = F_{m - 1} F_2 + F_m F_3 + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = F_{m - 1} F_n + F_m F_{n + 1} + | c = for $n = 2$ +}} +{{end-eqn}} +and so $P \left({2}\right)$ is seen to hold. +This is our [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $P \left({k}\right)$ and $P \left({k-1}\right)$ are true, where $k > 1$, then it logically follows that $P \left({k + 1}\right)$ is true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$\displaystyle F_{m + k} = F_{m - 1} F_k + F_m F_{k + 1}$ +and: +:$\displaystyle F_{m + k - 1} = F_{m - 1} F_{k - 1} + F_m F_k$ +from which it is to be shown: +:$\displaystyle F_{m + k + 1} = F_{m - 1} F_{k + 1} + F_m F_{k + 2}$ +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = F_{m + k + 1} + | r = F_{m + k} + F_{m + k - 1} + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = F_{m - 1} F_k + F_m F_{k + 1} + F_{m - 1} F_{k - 1} + F_m F_k + | c = [[Fibonacci Number in terms of Smaller Fibonacci Numbers/Proof 1#Induction Hypothesis|Induction Hypothesis]] +}} +{{eqn | r = F_{m - 1} \left({F_k + F_{k - 1} }\right) + F_m \left({F_{k + 1} + F_k}\right) + | c = +}} +{{eqn | r = F_{m - 1} F_{k + 1} + F_m F_{k + 2} + | c = {{Defof|Fibonacci Number}} +}} +{{end-eqn}} +So $P \left({k}\right) \land P \left({k - 1}\right) \implies P \left({k + 1}\right)$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\displaystyle \forall m, n \in \Z_{>0} : F_{m + n} = F_{m - 1} F_n + F_m F_{n + 1}$ +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | r = F_{m - 1} F_n + F_m F_{n + 1} + | o = +}} +{{eqn | r = \dfrac {\phi^{m - 1} - \hat \phi^{m - 1} } {\sqrt 5} \dfrac {\phi^n - \hat \phi^n} {\sqrt 5} + \dfrac {\phi^m - \hat \phi^m} {\sqrt 5} \dfrac {\phi^{n + 1} - \hat \phi^{n + 1} } {\sqrt 5} + | c = [[Euler-Binet Formula]] +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} - \phi^{m - 1} \hat \phi^n - \phi^n \hat \phi^{m - 1} + \hat \phi^{m + n - 1} + \phi^{m + n + 1} - \phi^m \hat \phi^{n + 1} - \phi^{n + 1} \hat \phi^m + \hat \phi^{m + n + 1} } 5 + | c = +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} \left({1 + \phi^2}\right) + \hat \phi^{m + n - 1} \left({1 + \hat \phi^2}\right) - \phi^{m - 1} \hat \phi^n \left({1 + \phi \hat \phi}\right) -\phi^n \hat \phi^{m - 1} \left({1 + \phi \hat \phi}\right)} 5 + | c = +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} \left({1 + \phi^2}\right) + \hat \phi^{m + n - 1} \left({1 + \hat \phi^2}\right)} 5 + | c = as $\phi \hat \phi = -1$ +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} \left({2 + \phi}\right) + \hat \phi^{m + n - 1} \left({2 + \hat \phi}\right)} 5 + | c = as both $\phi$ and $\hat \phi$ satisfy $x^2 = x + 1$ +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} \left({2 + \dfrac {1 + \sqrt 5} 2}\right) + \hat \phi^{m + n - 1} \left({2 + \dfrac {1 - \sqrt 5} 2}\right) } 5 + | c = Definition of [[Definition:Golden Mean|$\phi$ and $\hat \phi$]] +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} \left({\dfrac {5 + \sqrt 5} 2}\right) + \hat \phi^{m + n - 1} \left({\dfrac{ 5 - \sqrt 5} 2}\right)} 5 + | c = +}} +{{eqn | r = \dfrac {\phi^{m + n - 1} \left({\dfrac{1 + \sqrt 5} 2}\right) - \hat \phi^{m + n - 1} \left({\dfrac {1 - \sqrt 5} 2}\right)} {\sqrt 5} + | c = dividing [[Definition:Numerator|numerator]] and [[Definition:Denominator|denominator]] by $\sqrt 5$ +}} +{{eqn | r = \dfrac {\phi^{m + n} - \hat \phi^{m + n} } {\sqrt 5} + | c = Definition of [[Definition:Golden Mean|$\phi$ and $\hat \phi$]] +}} +{{eqn | r = F_{m + n} + | c = [[Euler-Binet Formula]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{GCD of Fibonacci Numbers} +Tags: Fibonacci Numbers, Greatest Common Divisor + +\begin{theorem} +:$\forall m, n \in \Z_{> 2}: \gcd \set {F_m, F_n} = F_{\gcd \set {m, n} }$ +where $\gcd \set {a, b}$ denotes the [[Definition:Greatest Common Divisor of Integers|greatest common divisor]] of $a$ and $b$. +\end{theorem} + +\begin{proof} +From the initial definition of [[Definition:Fibonacci Number|Fibonacci numbers]], we have: +:$F_1 = 1, F_2 = 1, F_3 = 2, F_4 = 3$ +{{WLOG}}, let $m \le n$. +Let $h$ be $\gcd \set {m, n}$. +Let $a$ and $b$ be [[Definition:Integer|integers]] such that $m = h a$ and $n = \map h {a + b}$. +$a$ and $a + b$ are [[Definition:Coprime Integers|coprime]] by [[Integers Divided by GCD are Coprime]]. +Therefore, $a$ and $b$ are coprime by [[Integer Combination of Coprime Integers]]. +{{begin-eqn}} +{{eqn | l = \gcd \set {F_m, F_n} + | r = \gcd \set {F_{h a}, F_{h a - 1} F_{h b} + F_{h a} F_{h b + 1} } + | c = [[Fibonacci Number in terms of Smaller Fibonacci Numbers]] +}} +{{eqn | r = \gcd \set {F_{h a}, F_{h a - 1} F_{h b} } + | c = [[GCD with Remainder]] +}} +{{end-eqn}} +Let $u$ and $v$ be integers such that $F_{h a} = u F_h$ and $F_{h b} = v F_h$, whose existence is proved by [[Divisibility of Fibonacci Number]]. +We have that $F_{h a}$ and $F_{h a - 1}$ are [[Definition:Coprime Integers|coprime]] by [[Consecutive Fibonacci Numbers are Coprime]]. +Therefore, $u$ and $F_{h a - 1}$ are [[Definition:Coprime Integers|coprime]] by [[Divisor of One of Coprime Numbers is Coprime to Other]]. +{{begin-eqn}} +{{eqn | l = \gcd \set {F_{h a}, F_{h a - 1} F_{h b} } + | r = F_h \gcd \set {u, v F_{h a - 1} } + | c = [[Fibonacci Number in terms of Smaller Fibonacci Numbers]] +}} +{{eqn | r = F_h \gcd \set {u, v} + | c = [[Solution of Linear Diophantine Equation]] +}} +{{eqn | r = \gcd \set {F_m, F_{n - m} } + | c = +}} +{{end-eqn}} +Therefore: +:$\forall m, n \in \Z_{>2} : \gcd \set {F_m, F_n} = \gcd \set {F_m, F_{n - m} }$ +This can be done recurrently to produce the result, in a fashion similar to the [[Euclidean Algorithm]]. +Since $a$ and $b$ are [[Definition:Coprime Integers|coprime]], the result would be $\gcd \set {F_h, F_h}$. +Therefore: +:$\forall m, n > 2 : \gcd \set {F_m, F_n} = F_{\gcd \set {m, n} }$ +{{qed}} +\end{proof}<|endoftext|> +\section{Catalan's Identity} +Tags: Fibonacci Numbers, Catalan's Identity + +\begin{theorem} +:${F_n}^2 - F_{n - r} F_{n + r} = \left({-1}\right)^{n - r} {F_r}^2$ +\end{theorem} + +\begin{proof} +Proof by [[Principle of Mathematical Induction|induction]]: +For all $n, r \in \N_{>0}$ where $n > r$, let $\map P {n, r}$ be the [[Definition:Proposition|proposition]]: +:${F_n}^2 - F_{n - r} F_{n + r} = \paren {-1}^{n - r} {F_r}^2$ +=== Basis for the Induction === +$n = 1$ yields no suitable $r$, so we look at $n = 2$ instead, which only gives us $r = 1$. +$\map P {2, 1}$ is true: +:${F_2}^2 - F_3 F_1 = 1^2 - 2 \times 1 = -1 = -1 \times {F_1}^2$ +$n = 3$ gives us only $r = 1$ and $r = 2$. +$\map P {3, 1}$ is true: +:${F_3}^2 - F_2 F_4 = 2^2 - 1 \times 3 = 1 = 1 \times {F_1}^2$ +$\map P {3, 2}$ is true: +:${F_3}^2 - F_1 F_5 = 2^2 - 1 \times 5 = -1 = -1 \times {F_2}^2$ +This is our [[Definition:Basis for the Induction|basis for the induction]]. +=== First Induction Hypothesis === +Now we need to show that, if $\map P {n, r}$ is true for all $r$, where $n > 3$, then it logically follows that $\map P {n + 1, r}$ is true for all $r$. +So this is our [[Definition:Induction Hypothesis|induction hypothesis]]: +:$\forall r < n : {F_n}^2 - F_{n - r} F_{n + r} = \paren {-1}^{n - r} {F_r}^2$ +Then we need to show: +:$\forall r < n : {F_{n + 1} }^2 - F_{n - r + 1} F_{n + r + 1} = \paren {-1}^{n - r + 1} {F_r}^2$ +=== Induction Step === +This is our [[Definition:Induction Step|induction step]]: +It will again be a proof by [[Principle of Mathematical Induction|induction]]. +==== Basis for the Induction ==== +When $r = 1$: +{{begin-eqn}} +{{eqn | l = {F_{n + 1} }^2 - F_n F_{n + 2} + | r = {F_{n + 1} }^2 - F_n \paren {F_{n + 1} + F_n} + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = {F_{n + 1} }^2 - F_n F_{n + 1} - {F_n}^2 + | c = +}} +{{eqn | r = F_{n + 1} \paren {F_{n + 1} - F_n} - {F_n}^2 + | c = +}} +{{eqn | r = F_{n + 1} F_{n - 1} - {F_n}^2 + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = \paren {-1} \paren {F_n^2 - F_{n - 1} F_{n + 1} } + | c = +}} +{{eqn | r = \paren {-1} \paren {-1}^{n - 1} {F_1}^2 + | c = [[Catalan's Identity/Proof 2#First Induction Hypothesis|First induction hypothesis]] +}} +{{eqn | r = \paren {-1}^n {F_1}^2 + | c = +}} +{{end-eqn}} +So $\map P {n + 1, 1}$ holds. +This is our [[Definition:Basis for the Induction|basis for the induction]]. +==== Second Induction Hypothesis ==== +Now we need to show that, if $\map P {n + 1, r}$ is true, where $2 < r < n$, then it logically follows that $\map P {n + 1, r + 1}$ is true. +So this is our second [[Definition:Induction Hypothesis|induction hypothesis]]: +:${F_{n + 1} }^2 - F_{n - r + 1} F_{n + r + 1} = \paren {-1}^{n - r + 1} {F_r}^2$ +Then we need to show: +:${F_{n + 1} }^2 - F_{n - r} F_{n + r + 2} = \paren {-1}^{n - r} {F_{r + 1} }^2$ +==== Induction Step ==== +This is our [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = {F_{n + 1} }^2 - F_{n - r} F_{n + r + 2} + | r = {F_{n + 1} }^2 - F_{n - r + 1} F_{n + r + 1} + F_{n - r + 1} F_{n + r + 1} - F_{n - r} F_{n + r + 2} + | c = +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + F_{n - r + 1} F_{n + r + 1} - F_{n - r} F_{n + r + 2} + | c = [[Catalan's Identity/Proof 2#Second Induction Hypothesis|Second induction hypothesis]] +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + F_{n - r + 1} F_{n + r + 1} - F_{n - r} \paren {F_{n + r} + F_{n + r + 1} } + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + \paren {F_{n - r + 1} - F_{n - r} } F_{n + r + 1} - F_{n - r} F_{n + r} + | c = +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + F_{n - r - 1} F_{n + r + 1} - F_{n - r} F_{n + r} + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 - F_{n - r} F_{n + r} + F_{n - r - 1} F_{n + r + 1} + | c = +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + {F_n}^2 - F_{n - r} F_{n + r} - {F_n}^2 + F_{n - r - 1} F_{n + r + 1} + | c = +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + \paren {-1}^{n - r} {F_r}^2 - \paren {-1}^{n - r + 1} {F_{r + 1} }^2 + | c = [[Catalan's Identity#First Induction Hypothesis|First induction hypothesis]] +}} +{{eqn | r = \paren {-1}^{n - r + 1} {F_r}^2 + \paren {-1}^{n - r} {F_r}^2 + \paren {-1}^{n - r} {F_{r + 1} }^2 + | c = +}} +{{eqn | r = \paren {-1}^{n - r} \paren {- {F_r}^2 + {F_r}^2} + \paren {-1}^{n - r} {F_{r + 1} }^2 + | c = +}} +{{eqn | r = \paren {-1}^{n - r} {F_{r + 1} }^2 + | c = +}} +{{end-eqn}} +So $\map P {n + 1, r} \implies \map P {n + 1, r + 1}$ and the result follows by the [[Principle of Mathematical Induction]]. +So $\map P {n, r} \implies \map P {n + 1, r}$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$F_n^2 - F_{n - r} F_{n + r} = \paren {-1}^{n - r} {F_r}^2$ +{{qed}} +\end{proof}<|endoftext|> +\section{Fibonacci Number with Negative Index} +Tags: Fibonacci Numbers, Proofs by Induction + +\begin{theorem} +:$\forall n \in \Z_{> 0} : F_{-n} = \left({-1}\right)^{n + 1} F_n$ +\end{theorem} + +\begin{proof} +From the initial definition of [[Definition:Fibonacci Number|Fibonacci numbers]], we have: +:$F_0 = 0, F_1 = 1, F_2 = 1, F_3 = 2, F_4 = 3$ +By definition of the [[Definition:Fibonacci Number for Negative Index|extension of the Fibonacci numbers to negative integers]]: +:$F_n = F_{n + 2} - F_{n - 1}$ +The proof proceeds by [[Principle of Mathematical Induction|induction]]. +For all $n \in \N_{>0}$, let $P \left({n}\right)$ be the [[Definition:Proposition|proposition]]: +:$F_{-n} = \left({-1}\right)^n F_n$ +=== Basis for the Induction === +$P \left({1}\right)$ is the case: +{{begin-eqn}} +{{eqn | l = F_{-1} + | r = F_1 - F_0 + | c = +}} +{{eqn | r = 1 - 0 + | c = +}} +{{eqn | r = 1 + | c = +}} +{{eqn | r = \left({-1}\right)^{1 + 1} F_1 + | c = +}} +{{end-eqn}} +So $P(1)$ is seen to hold. +$P \left({2}\right)$ is the case: +{{begin-eqn}} +{{eqn | l = F_{-2} + | r = F_0 - F_{-1} + | c = +}} +{{eqn | r = 0 - 1 + | c = +}} +{{eqn | r = -1 + | c = +}} +{{eqn | r = \left({-1}\right)^{2 + 1} F_2 + | c = +}} +{{end-eqn}} +So $P(2)$ is seen to hold. +This is our [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $P \left({k}\right)$ and $P \left({k-1}\right)$ are true, where $k > 1$, then it logically follows that $P \left({k+1}\right)$ is true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$F_{-\left({k - 1}\right)} = \left({-1}\right)^k F_{k - 1}$ +:$F_{-k} = \left({-1}\right)^{k + 1} F_k$ +Then we need to show: +:$F_{-\left({k + 1}\right)} = \left({-1}\right)^{k + 2} F_{k + 1}$ +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = F_{- \left({k + 1}\right)} + | r = F_{-\left({k - 1}\right)} - F_{-k} + | c = {{Defof|Fibonacci Number for Negative Index}} +}} +{{eqn | r = \left({-1}\right)^k F_{k - 1} - \left({-1}\right)^{k + 1} F_k + | c = [[Fibonacci Number with Negative Index#Induction Hypothesis|Induction Hypothesis]] +}} +{{eqn | r = \left({-1}\right)^k F_{k - 1} + \left({-1}\right)^k F_k + | c = +}} +{{eqn | r = \left({-1}\right)^k \left({F_{k - 1} + F_k}\right) + | c = +}} +{{eqn | r = \left({-1}\right)^k \left({F_{k + 1} }\right) + | c = {{Defof|Fibonacci Number}} +}} +{{eqn | r = \left({-1}\right)^{k + 2} \left({F_{k + 1} }\right) + | c = +}} +{{end-eqn}} +So $P \left({k}\right) \land P \left({k-1}\right) \implies P \left({k + 1}\right)$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\forall n \in \Z_{>0} : F_{-n} = \left({-1}\right)^{n + 1} F_n$ +{{qed}} +\end{proof}<|endoftext|> +\section{Fibonacci Number in terms of Larger Fibonacci Numbers} +Tags: Fibonacci Numbers + +\begin{theorem} +:$\forall m, n \in \Z_{>0} : F_{m - n} = \left({-1}\right)^{n + 1} F_{m - 1} F_n + \left({-1}\right)^n F_m F_{n - 1}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = F_{m - n} + | r = F_{m + \left({-n}\right)} + | c = {{Defof|Integer Subtraction}} +}} +{{eqn | r = F_{m - 1} F_{-n} + F_m F_{-n + 1} + | c = [[Fibonacci Number in terms of Smaller Fibonacci Numbers]] +}} +{{eqn | r = \left({-1}\right)^{n + 1} F_{m - 1} F_n + \left({-1}\right)^n F_m F_{n - 1} + | c = [[Fibonacci Number with Negative Index]] +}} +{{end-eqn}} +{{qed}} +[[Category:Fibonacci Numbers]] +dm87q8ycvy4zkzxcxa3zjfjjtpfmnvt +\end{proof}<|endoftext|> +\section{Fibonacci Number as Sum of Binomial Coefficients} +Tags: Fibonacci Numbers, Binomial Coefficients, Proofs by Induction, Fibonacci Number as Sum of Binomial Coefficients + +\begin{theorem} +{{begin-eqn}} +{{eqn | lo= \forall n \in \Z_{>0}: + | l = F_n + | r = \sum_{k \mathop = 0}^{\floor {\frac {n - 1} 2} } \dbinom {n - k - 1} k + | c = +}} +{{eqn | r = \binom {n - 1} 0 + \binom {n - 2} 1 + \binom {n - 3} 2 + \dotsb + \binom {n - j} {j - 1} + \binom {n - j - 1} j + | c = where $j = \floor {\frac {n - 1} 2}$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +By definition of [[Definition:Fibonacci Numbers|Fibonacci numbers]]: +:$F_1 = 1, F_2 = 1, F_3 = 2, F_4 = 3, \ldots$ +The proof proceeds by [[Principle of Mathematical Induction|induction]]. +For all $n \in \Z_{>0}$, let $P(n)$ be the [[Definition:Proposition|proposition]]: +:$\displaystyle F_n = \sum_{k \mathop = 0}^{\floor {\frac {n - 1} 2} } \dbinom {n - k - 1} k$ +=== Basis for the Induction === +$\map P 1$ is the case: +{{begin-eqn}} +{{eqn | l = F_1 + | r = 1 + | c = +}} +{{eqn | r = \dbinom 0 0 + | c = [[Zero Choose Zero]] +}} +{{eqn | r = \dbinom {1 - 0 - 1} 0 + | c = +}} +{{eqn | r = \sum_{k \mathop = 0}^{\floor {\frac {1 - 1} 2} } \dbinom {1 - k - 1} k + | c = +}} +{{end-eqn}} +So $\map P 1$ is seen to hold. +$\map P 2$ is the case: +{{begin-eqn}} +{{eqn | l = F_2 + | r = 1 + 0 + | c = +}} +{{eqn | r = \dbinom 1 0 + | c = [[Binomial Coefficient with Zero]] +}} +{{eqn | r = \dbinom {2 - 0 - 1} 0 + | c = +}} +{{eqn | r = \sum_{k \mathop = 0}^{\floor {\frac {2 - 1} 2} } \dbinom {2 - k - 1} k + | c = +}} +{{end-eqn}} +So $\map P 2$ is also seen to hold. +This is our [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $\map P {k - 1}$ and $\map P k$ are true, where $k > 2$ is an [[Definition:Even Integer|even integer]], then it logically follows that $\map P {k + 1}$ and $\map P {k + 2}$ are both true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$\displaystyle F_{k - 1} = \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 2} i$ +:$\displaystyle F_k = \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 1} i$ +Then we need to show: +:$\displaystyle F_{k + 1} = \sum_{i \mathop = 0}^{\frac k 2} \dbinom {k - i} i$ +:$\displaystyle F_{k + 2} = \sum_{i \mathop = 0}^{\frac k 2} \dbinom {k - i + 1} i$ +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +For the first part: +{{begin-eqn}} +{{eqn | l = \sum_{i \mathop = 0}^{\frac k 2} \dbinom {k - i} i + | r = \dbinom k 0 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i} i + \dbinom {k - \frac k 2} {\frac k 2} + | c = +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i} i + \dbinom {\frac k 2} {\frac k 2} + | c = [[Binomial Coefficient with Zero]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i} i + 1 + | c = [[Binomial Coefficient with Self]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \left({ \dbinom {k - i - 1} i + \dbinom {k - i - 1} {i - 1} }\right) + 1 + | c = [[Pascal's Rule]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i - 1} i + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i - 1} {i - 1} + 1 + | c = [[Summation is Linear]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i - 1} i + \sum_{i \mathop = 0}^{\frac k 2 - 2} \dbinom {k - i - 2} i + 1 + | c = +}} +{{eqn | r = \dbinom {k - 2} 0 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i - 1} i + \sum_{i \mathop = 0}^{\frac k 2 - 2} \dbinom {k - i - 2} i + 1 + | c = [[Binomial Coefficient with Zero]] +}} +{{eqn | r = \dbinom {k - 2} 0 + \sum_{i \mathop = 1}^{\frac k 2 - 1} \dbinom {k - i - 1} i + \sum_{i \mathop = 0}^{\frac k 2 - 2} \dbinom {k - i - 2} i + \dbinom {k - \paren {\frac k 2 - 1} - 2} {\frac k 2 - 1} + | c = [[Binomial Coefficient with Self]] +}} +{{eqn | r = \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 1} i + \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 2} i + | c = +}} +{{eqn | r = F_k + F_{k - 1} + | c = [[Fibonacci Number as Sum of Binomial Coefficients#Induction Hypothesis|Induction hypothesis]] +}} +{{eqn | r = F_{k + 1} + | c = {{Defof|Fibonacci Number}} +}} +{{end-eqn}} +For the second part: +{{begin-eqn}} +{{eqn | l = \sum_{i \mathop = 0}^{\frac k 2} \dbinom {k - i + 1} i + | r = \dbinom k 0 + \sum_{i \mathop = 1}^{\frac k 2} \dbinom {k - i + 1} i + | c = +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2} \dbinom {k - i + 1} i + | c = [[Binomial Coefficient with Zero]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2} \paren {\dbinom {k - i} i + \dbinom {k - i} {i - 1} } + | c = [[Pascal's Rule]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2} \dbinom {k - i} i + \sum_{i \mathop = 1}^{\frac k 2} \dbinom {k - i} {i - 1} + | c = [[Summation is Linear]] +}} +{{eqn | r = 1 + \sum_{i \mathop = 1}^{\frac k 2} \dbinom {k - i} i + \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 1} i + | c = +}} +{{eqn | r = \dbinom {k - 2} 0 + \sum_{i \mathop = 1}^{\frac k 2} \dbinom {k - i} i + \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 1} i + | c = [[Binomial Coefficient with Zero]] +}} +{{eqn | r = \sum_{i \mathop = 0}^{\frac k 2} \dbinom {k - i} i + \sum_{i \mathop = 0}^{\frac k 2 - 1} \dbinom {k - i - 1} i + | c = +}} +{{eqn | r = F_{k + 1} + F_k + | c = [[Fibonacci Number as Sum of Binomial Coefficients#Induction Hypothesis|Induction hypothesis]] +}} +{{eqn | r = F_{k + 2} + | c = {{Defof|Fibonacci Number}} +}} +{{end-eqn}} +So $\map P {k - 1} \land \map P k \implies \map P {k + 1} \land \map P {k + 2}$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\displaystyle \forall n \in \Z_{>0}: F_n = \sum_{k \mathop = 0}^{\floor {\frac {n - 1} 2} } \dbinom {n - k - 1} k$ +{{qed}} +\end{proof}<|endoftext|> +\section{Vajda's Identity} +Tags: Fibonacci Numbers, Vajda's Identity + +\begin{theorem} +==== [[Vajda's Identity/Formulation 1|Formulation 1]] ==== +{{:Vajda's Identity/Formulation 1}} +==== [[Vajda's Identity/Formulation 2|Formulation 2]] ==== +{{:Vajda's Identity/Formulation 2}} +\end{theorem}<|endoftext|> +\section{Determinant of Matrix Product/General Case} +Tags: Determinant of Matrix Product + +\begin{theorem} +Let $\mathbf A_1, \mathbf A_2, \cdots, \mathbf A_n$ be [[Definition:Square Matrix|square matrices]] of [[Definition:Order of Square Matrix|order $n$]], where $n > 1$. +Then: +:$\map \det {\mathbf A_1 \mathbf A_2 \cdots \mathbf A_n} = \map \det {\mathbf A_1} \map \det {\mathbf A_2} \cdots \map \det {\mathbf A_n}$ +\end{theorem} + +\begin{proof} +Proof by [[Principle of Mathematical Induction|induction]]: +=== Basis for the Induction === +$n = 2$ holds by [[Determinant of Matrix Product]]. +So shown for [[Definition:Basis for the Induction|base case]]. +=== Induction Hypothesis === +This is our [[Definition:Induction Hypothesis|induction hypothesis]]: +:$\map \det {\mathbf A_1 \mathbf A_2 \cdots \mathbf A_k} = \map \det {\mathbf A_1} \map \det {\mathbf A_2} \cdots \map \det {\mathbf A_k}$ +Now we need to show true for $n = k + 1$: +:$\map \det {\mathbf A_1 \mathbf A_2 \cdots \mathbf A_k \mathbf A_{k + 1} } = \map \det {\mathbf A_1} \map \det {\mathbf A_2} \cdots \map \det {\mathbf A_k} \map \det {\mathbf A_{k + 1} }$ +=== Induction Step === +This is our [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = \map \det {\mathbf A_1 \mathbf A_2 \cdots \mathbf A_k \mathbf A_{k + 1} } + | r = \map \det {\mathbf A_1 \mathbf A_2 \cdots \mathbf A_k} \map \det {\mathbf A_{k + 1} } + | c = [[Determinant of Matrix Product]] +}} +{{eqn | r = \map \det {\mathbf A_1} \map \det {\mathbf A_2} \cdots \map \det {\mathbf A_k} \map \det {\mathbf A_{k + 1} } + | c = [[Determinant of Matrix Product/General Case#Induction Hypothesis|Induction hypothesis]] +}} +{{end-eqn}} +The result follows by [[Principle of Mathematical Induction|induction]]: +:$\map \det {\mathbf A_1 \mathbf A_2 \cdots \mathbf A_n} = \map \det {\mathbf A_1} \map \det {\mathbf A_2} \cdots \map \det {\mathbf A_n}$ +{{qed}} +[[Category:Determinant of Matrix Product]] +gcka4srqc7leplzrelff1nhk7zlsiq3 +\end{proof}<|endoftext|> +\section{Opposite Sides Equal implies Parallelogram} +Tags: Parallelograms + +\begin{theorem} +Let $ABCD$ be a [[Definition:Convex Polygon|convex]] [[Definition:Quadrilateral|quadrilateral]] with $AB = CD$ and $BC = AD$. +Then $ABCD$ is a [[Definition:Parallelogram|parallelogram]]. +\end{theorem} + +\begin{proof} +Join $AC$. +{{begin-eqn}} +{{eqn | n = 1 + | l = AB + | r = CD + | c = Given +}} +{{eqn | n = 2 + | l = BC + | r = DA + | c = Given +}} +{{eqn | n = 3 + | l = AC + | r = CA + | c = [[Equality is Reflexive]] +}} +{{eqn | n = 4 + | l = \Delta ABC + | r = \Delta CDA + | c = [[SSS]] from $(1)$, $(2)$, and $(3)$ +}} +{{eqn | n = 5 + | l = \angle BCA + | r = \angle DAC + | c = from $(4)$ +}} +{{eqn | n = 6 + | l = \angle BAC + | r = \angle DCA + | c = from $(4)$ +}} +{{eqn | n = 7 + | l = BC + | o = \parallel + | r = DA + | c = [[Equal Alternate Angles implies Parallel Lines]] from $(5)$ +}} +{{eqn | n = 8 + | l = BA + | o = \parallel + | r = DC + | c = [[Equal Alternate Angles implies Parallel Lines]] from $(6)$ +}} +{{end-eqn}} +From $(7)$ and $(8)$, it follows by definition that $ABCD$ is a [[Definition:Parallelogram|parallelogram]]. +{{qed}} +[[Category:Parallelograms]] +bb5ds8cs25ff241o6a6jcnghhw6kv8i +\end{proof}<|endoftext|> +\section{Properties of Fibonacci Numbers} +Tags: Fibonacci Numbers + +\begin{theorem} +Let $F_n$ denote the $n$th [[Definition:Fibonacci Number|Fibonacci number]]: +{{:Definition:Fibonacci Number}} +\end{theorem}<|endoftext|> +\section{Principle of Mathematical Induction/Naturally Ordered Semigroup/General Result} +Tags: Naturally Ordered Semigroup, Principle of Mathematical Induction + +\begin{theorem} +Let $\struct {S, \circ, \preceq}$ be a [[Definition:Naturally Ordered Semigroup|naturally ordered semigroup]]. +Let $p \in S$. +Let $T \subseteq S$ such that: +:$x \in T \implies p \preceq x \land \paren {x \in T \implies x \circ 1 \in T}$ +Then: +:$S \setminus S_p \subseteq T$ +where: +:$\setminus$ denotes [[Definition:Set Difference|set difference]] +:$S_p$ denotes the [[Definition:Initial Segment|set of all elements of $S$ preceding $p$]]. +\end{theorem} + +\begin{proof} +Let $S_p$ be the [[Definition:Initial Segment|set of all elements of $S$ preceding $p$]]: +:$S_p = \set {x \in S: x \prec p}$ +Let $T' = T \cup S_p$. +Then the set $T'$ satisfies the conditions of the [[Principle of Mathematical Induction for Naturally Ordered Semigroup|Principle of Mathematical Induction for a Naturally Ordered Semigroup]]. +From that result: +:$T' = S$ +By [[Set Difference with Union is Set Difference]]: +:$S \setminus S_p = T \setminus S_p$ +By [[Set Difference is Subset]]: +:$T \setminus S_p \subseteq T$ +completing the proof. +{{qed}} +\end{proof}<|endoftext|> +\section{Principle of Mathematical Induction for Minimal Infinite Successor Set} +Tags: Minimal Infinite Successor Set + +\begin{theorem} +Let $\omega$ be the [[Definition:Minimal Infinite Successor Set|minimal infinite successor set]]. +Let $S \subseteq \omega$. +Suppose that: +:$(1): \quad \varnothing \in S$ +:$(2): \quad \forall x: x \in S \implies x^+ \in S$ +where $x^+$ is the [[Definition:Successor Set|successor set]] of $x$. +Then: +:$S = \omega$ + +\end{theorem} + +\begin{proof} +The hypotheses state precisely that $S$ is an [[Definition:Infinite Successor Set|infinite successor set]]. +Then the [[Definition:Minimal Infinite Successor Set|minimal infinite successor set]] $\omega$ being defined as the [[Definition:Set Intersection|intersection]] of all [[Definition:Infinite Successor Set|infinite successor sets]], we conclude that: +:$\omega \subseteq S$ +by [[Intersection is Subset/General Result|Intersection is Subset: General Result]]. +Thus, by definition of [[Definition:Set Equality/Definition 2|set equality]]: +:$S = \omega$ +{{qed}} +\end{proof}<|endoftext|> +\section{Principle of Mathematical Induction for Natural Numbers in Real Numbers} +Tags: Natural Numbers in Real Numbers + +\begin{theorem} +Let $\struct {\R, +, \times, \le}$ be the [[Definition:Field of Real Numbers|field of real numbers]]. +Let $\N$ be the [[Definition:Natural Numbers in Real Numbers|natural numbers in $\R$]]. +Suppose that $A \subseteq \N$ is an [[Definition:Inductive Set as Subset of Real Numbers|inductive set]]. +Then $A = \N$. +\end{theorem} + +\begin{proof} +By definition of the [[Definition:Natural Numbers in Real Numbers|natural numbers in $\R$]]: +:$\N = \displaystyle \bigcap \II$ +where $\II$ is the set of [[Definition:Inductive Set as Subset of Real Numbers|inductive sets]] in $\R$. +Since $A$ was supposed to be [[Definition:Inductive Set as Subset of Real Numbers|inductive]], it follows that: +:$\N \subseteq A$ +from [[Intersection is Subset/General Result|Intersection is Subset: General Result]]. +Hence by definition of [[Definition:Set Equality/Definition 2|set equality]]: +:$A = \N$ +{{qed}} +\end{proof}<|endoftext|> +\section{Chinese Remainder Theorem (Commutative Algebra)} +Tags: Commutative Algebra + +\begin{theorem} +Let $A$ be a commutative ring. +{{explain|Presumably a [[Definition:Commutative and Unitary Ring]] is actually required here?}} +Let $I_1, \ldots, I_k$ for some $k \ge 1$ be pairwise coprime ideals in $A$, that is: +: $\forall i \ne j: I_i + I_j = A$ +Then there is an isomorphism of rings: +:$A / \left({I_1 \cap \ldots \cap I_k}\right) \to A / I_1 \times \cdots \times A / I_k$ +which is induced by the [[Definition:Ring Homomorphism|ring homomorphism]] $\phi: A \to A / I_1 \times \cdots \times A / I_k$ defined as: +:$\phi \left({x}\right) = \left({x + I_1, \ldots, x + I_k}\right)$ +which passes through the quotient. +{{explain|what is meant by "passes through the quotient"?}} +\end{theorem} + +\begin{proof} +The mapping $\phi$ is indeed a [[Definition:Ring Homomorphism|ring homomorphism]], because each $A \to A / I_i$ is a [[Definition:Ring Homomorphism|ring homomorphism]]. +The kernel of $\phi$ is given by: +:$\displaystyle \ker \phi = \left\{{x \in A: \forall i, 1 \le i \le k : x \in I_i}\right\} = \bigcap_{1 \mathop \le i \mathop \le k} I_i$ +So $\phi$ defines an [[Definition:Injection|injective]] [[Definition:Ring Homomorphism|homomorphism]] by passing through the quotient. +It remains then to prove that: +:$\tilde \phi \equiv \phi \circ \pi$ +where $\pi$ is the canonical quotient-map, is surjective. +That is: +:$\forall x_i \in A, 1 \le i \le k: \exists x \in A: x - x_i \in I_i, 1 \le i \le k$ +Then it follows that: +:$\displaystyle \left({x_1 + I_1, \ldots, x_k + I_k}\right) = \tilde \phi \left({x + \bigcap I_i}\right)$ +Note that: +:$\left({x_1 + I_1, \ldots, x_k + I_k}\right) = \left({x_1 + I_1}\right) e_1 + \cdots + \left({x_k + I_k}\right) e_k$ +where the unit element lies at the $i$-th component: +:$e_i = \left({0, \ldots, 0, 1_{A / I_i}, 0, \ldots, 0}\right)$ +{{explain|Unity not unit, yeah?}} +This implies that it is enough to find $a_i \in A, 1 \le i \le k$, such that: +:$\tilde \phi (a_i + \bigcap I_i) = e_i$ +since: +{{begin-eqn}} +{{eqn | l = \tilde \phi \left({x_1 a_1 + \cdots + x_k a_k}\right) + | r = \tilde \phi \left({x_1}\right) \tilde \phi \left({a_1}\right) + \cdots + \tilde \phi \left({x_k}\right) \tilde \phi \left({a_k}\right) + | c = +}} +{{eqn | r = \left({\tilde \phi \left({x_1}\right), \ldots, \tilde \phi \left({x_k}\right)}\right) + | c = +}} +{{eqn | r = \left({x_1 + I_1, \ldots, x_n + I_n}\right) + | c = +}} +{{end-eqn}} +We show that $e_1$ is in the image of $\tilde \phi$. +The other instances of $e_i$ then follow in a similar manner. +We then need to find an $a \in A$ such that $a -1 \in I_1, a \in I_2, \ldots, a \in I_k$. +Since then $I_1$ is coprime with the other ideals, we have that $I_1 + I_j = A, 2 \le j \le k$. +So there exists a $b_j \in I_1$, $c_j \in I_j$ such that $b_j + c_j = 1$. +Note that this implies that $c_j - 1 \in I_1, c_j \in I_j$. +Define now $a = c_2 c_3 \cdots c_k \in A$. +Then for $2 \le j \le k$: +:$a = c_j \left({c_2 \cdots c_{j - 1} c_{j + 1} \cdots c_k}\right) \in I_j$ +and: +{{begin-eqn}} +{{eqn | l = a + I_1 + | r = \left({c_2 + I_1}\right) \cdots \left({c_k + I_1}\right) + | c = +}} +{{eqn | r = \left({1 + I_1}\right) \cdots \left({1 + I_1}\right) + | c = +}} +{{eqn | r = 1 + I_1 + | c = +}} +{{end-eqn}} +Hence: +:$a - 1 \in I_1$ +{{qed}} +[[Category:Commutative Algebra]] +38mf2x0o58risu8v5x45d5bltmq92o1 +\end{proof}<|endoftext|> +\section{Sine of i} +Tags: Sine Function, Complex Numbers, Sine of i + +\begin{theorem} +:$\sin i = \paren {\dfrac e 2 - \dfrac 1 {2 e} } i$ +\end{theorem} + +\begin{proof} +We have: +{{begin-eqn}} +{{eqn | n = 1 + | l = \cos i + i \sin i + | r = e^{i \times i} + | c = [[Euler's Formula]] +}} +{{eqn | r = e^{-1} + | c = Definition of [[Definition:Complex Number/Definition 1|Imaginary Unit]] +}} +{{eqn | r = \frac 1 e +}} +{{end-eqn}} +Also: +{{begin-eqn}} +{{eqn | n = 2 + | l = \cos i - i \sin i + | r = \cos \left({-i}\right) + i \sin \left({-i}\right) + | c = [[Cosine Function is Even]] and [[Sine Function is Odd]] +}} +{{eqn | r = e^{i \times \left({-i}\right)} + | c = [[Euler's Formula]] +}} +{{eqn | r = e^1 + | c = Definition of [[Definition:Complex Number/Definition 1|Imaginary Unit]] +}} +{{eqn | r = e +}} +{{end-eqn}} +Then from $(1) - (2)$: +{{begin-eqn}} +{{eqn | l = 2 i \sin i + | r = \frac 1 e - e +}} +{{eqn | ll= \implies + | l = \sin i + | r = \frac 1 {2 i} \left({\frac 1 e - e}\right) +}} +{{eqn | r = \left({\frac e 2 - \frac 1 {2 e} }\right) i +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sin i + | r = i \sinh 1 + | c = [[Hyperbolic Sine in terms of Sine]] +}} +{{eqn | r = i \frac {e^1 - e^{-1} } 2 + | c = {{Defof|Hyperbolic Sine}} +}} +{{eqn | r = \paren {\frac e 2 - \frac 1 {2 e} } i +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cosine of i} +Tags: Cosine Function, Complex Numbers, Cosine of i + +\begin{theorem} +:$\cos i = \dfrac e 2 + \dfrac 1 {2 e}$ +\end{theorem} + +\begin{proof} +We have: +{{begin-eqn}} +{{eqn | n = 1 + | l = \cos i + i \sin i + | r = e^{i \times i} + | c = [[Euler's Formula]] +}} +{{eqn | r = e^{-1} + | c = Definition of [[Definition:Complex Number/Definition 1|Imaginary Unit]] +}} +{{eqn | r = \frac 1 e +}} +{{end-eqn}} +Also: +{{begin-eqn}} +{{eqn | n = 2 + | l = \cos i - i \sin i + | r = \cos \left({-i}\right) + i \sin \left({-i}\right) + | c = [[Cosine Function is Even]] and [[Sine Function is Odd]] +}} +{{eqn | r = e^{i \times \left({-i}\right)} + | c = [[Euler's Formula]] +}} +{{eqn | r = e^1 + | c = Definition of [[Definition:Complex Number/Definition 1|Imaginary Unit]] +}} +{{eqn | r = e +}} +{{end-eqn}} +Then from $(1) + (2)$: +{{begin-eqn}} +{{eqn | l = 2 \cos i + | r = \frac 1 e + e +}} +{{eqn | ll= \implies + | l = \cos i + | r = \frac 1 2 \left({\frac 1 e + e}\right) +}} +{{eqn | r = \frac e 2 + \frac 1 {2 e} +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cos i + | r = \cosh 1 + | c = [[Hyperbolic Cosine in terms of Cosine]] +}} +{{eqn | r = \frac {e^1 + e^{-1} } 2 + | c = {{Defof|Hyperbolic Cosine}} +}} +{{eqn | r = \frac e 2 + \frac 1 {2 e} +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Tangent of i} +Tags: Tangent Function, Complex Numbers, Tangent of i + +\begin{theorem} +:$\tan i = \left({\dfrac {e^2 - 1} {e^2 + 1} }\right) i$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \tan i + | r = i \tanh 1 + | c = [[Hyperbolic Tangent in terms of Tangent]] +}} +{{eqn | r = \paren {\frac {e^1 - e^{-1} } {e^1 + e^{-1} } } i + | c = {{Defof|Hyperbolic Tangent}} +}} +{{eqn | r = \paren {\frac {e^2 - 1} {e^2 + 1} } i + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $e$ +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \tan i + | r = \frac {\sin i} {\cos i} + | c = {{Defof|Complex Tangent Function}} +}} +{{eqn | r = \frac {\left({\frac e 2 - \frac 1 {2 e} }\right) i} {\frac e 2 + \frac 1 {2 e} } + | c = [[Sine of i|Sine of $i$]] and [[Cosine of i|Cosine of $i$]] +}} +{{eqn | r = \left({\frac {e - \frac 1 e } {e + \frac 1 e} }\right) i + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $2$ +}} +{{eqn | r = \left({\frac {e^2 - 1} {e^2 + 1} }\right) i + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $e$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cosecant of i} +Tags: Cosecant Function, Complex Numbers, Cosecant of i + +\begin{theorem} +:$\csc i = \left({\dfrac {2 e} {1 - e^2} }\right) i$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \csc i + | r = -i \csch 1 + | c = [[Hyperbolic Cosecant in terms of Cosecant]] +}} +{{eqn | r = -\paren {\frac 2 {e^1 - e^{-1} } } i + | c = {{Defof|Hyperbolic Cosecant}} +}} +{{eqn | r = -\paren {\frac {2 e} {e^2 - 1} } i + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $e$ +}} +{{eqn | r = \paren {\frac {2 e} {1 - e^2} } i +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \csc i + | r = \frac 1 {\sin i} + | c = Definition of [[Definition:Complex Cosecant Function|Complex Cosecant]] +}} +{{eqn | r = \frac 1 {\left({ \frac e 2 - \frac 1 {2e} }\right) i} + | c = [[Sine of i|Sine of $i$]] +}} +{{eqn | r = \left({ \frac 1 {\frac 1 {2e} - \frac e {2} } }\right) i + | c = [[Reciprocal of i|Reciprocal of $i$]] +}} +{{eqn | r = \left({\frac {2 e} {1 - e^2} }\right) i + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $2 e$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Secant of i} +Tags: Secant Function, Complex Numbers, Secant of i + +\begin{theorem} +:$\sec i = \dfrac {2 e} {e^2 + 1}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sec i + | r = \frac 1 {\cos i} + | c = Definition of [[Definition:Complex Secant Function|Complex Secant]] +}} +{{eqn | r = \frac 1 {\frac e 2 + \frac 1 {2 e} } + | c = [[Cosine of i|Cosine of $i$]] +}} +{{eqn | r = \frac {2 e} {e^2 + 1} + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $2 e$ +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sec i + | r = \sech 1 + | c = [[Hyperbolic Secant in terms of Secant]] +}} +{{eqn | r = \frac 2 {e^1 + e^{-1} } + | c = {{Defof|Hyperbolic Secant}} +}} +{{eqn | r = \frac {2 e} {e^2 + 1} + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $e$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cotangent of i} +Tags: Cotangent Function, Complex Numbers, Cotangent of i + +\begin{theorem} +:$\cot i = \left({\dfrac {1 + e^2} {1 - e^2} }\right) i$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cot i + | r = -i \coth 1 + | c = [[Hyperbolic Cotangent in terms of Cotangent]] +}} +{{eqn | r = -\paren {\frac {e^1 + e^{-1} } {e^1 - e^{-1} } } i + | c = {{Defof|Hyperbolic Cotangent|index = 1}} +}} +{{eqn | r = -\paren {\frac {e^2 + 1} {e^2 - 1} } i + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $e$ +}} +{{eqn | r = \paren {\frac {1 + e^2} {1 - e^2} } i +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cot i + | r = \frac {\cos i} {\sin i} + | c = {{Defof|Complex Cotangent Function}} +}} +{{eqn | r = \frac {\frac e 2 + \frac 1 {2 e} } {\left({\frac e 2 - \frac 1 {2 e} }\right) i} + | c = [[Cosine of i|Cosine of $i$]] and [[Sine of i|Sine of $i$]] +}} +{{eqn | r = \left({\frac {e + \frac 1 e} {e - \frac 1 e} }\right) \left({\frac 1 i}\right) + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $2$ +}} +{{eqn | r = \left({ \frac {e^2 + 1} {e^2 - 1} }\right) \left({\frac 1 i}\right) + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $e$ +}} +{{eqn | r = \left({ \frac {1 + e^2} {1 - e^2} }\right) i + | c = [[Reciprocal of i|Reciprocal of $i$]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Sine in terms of Cosine} +Tags: Sine Function, Cosine Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \sin x + | r = +\sqrt {1 - \cos ^2 x} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ +}} +{{eqn | l = \sin x + | r = -\sqrt {1 - \cos ^2 x} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cos^2 x + \sin^2 x + | r = 1 + | c = [[Sum of Squares of Sine and Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin^2 x + | r = 1 - \cos^2 x +}} +{{eqn | ll= \leadsto + | l = \sin x + | r = \pm \sqrt {1 - \cos^2 x} +}} +{{end-eqn}} +Then from [[Sign of Sine]]: +{{begin-eqn}} +{{eqn | l = \sin x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ +}} +{{eqn | l = \sin x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Secant in terms of Tangent} +Tags: Secant Function, Tangent Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \sec x + | r = +\sqrt {\tan ^2 x + 1} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \sec x + | r = -\sqrt {\tan ^2 x + 1} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sec^2 x - \tan^2 x + | r = 1 + | c = [[Difference of Squares of Secant and Tangent]] +}} +{{eqn | ll= \leadsto + | l = \sec^2 x + | r = \tan^2 x + 1 +}} +{{eqn | ll= \leadsto + | l = \sec x + | r = \pm \sqrt {\tan ^2 x + 1} +}} +{{end-eqn}} +Also, from [[Sign of Secant]]: +:If there exists [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$, then $\sec x > 0$. +:If there exists [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$, then $\sec x < 0$. +When $\cos x = 0$, $\sec x$ and $\tan x$ is undefined. +{{qed}} +\end{proof}<|endoftext|> +\section{Sine in terms of Tangent} +Tags: Sine Function, Tangent Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \sin x + | r = +\frac {\tan x} {\sqrt {1 + \tan^2 x} } + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \sin x + | r = -\frac {\tan x} {\sqrt {1 + \tan^2 x} } + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +For the first part, if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$: +{{begin-eqn}} +{{eqn | l = \cos x + | r = +\frac 1 {\sqrt {1 + \tan^2 x} } + | c = [[Cosine in terms of Tangent]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\paren {\frac 1 {\cos x} } } + | r = +\frac 1 {\sqrt {1 + \tan^2 x} } +}} +{{eqn | ll= \leadsto + | l = \frac {\sin x} {\paren {\frac {\sin x} {\cos x} } } + | r = +\frac 1 {\sqrt {1 + \tan^2 x} } + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $\sin x$ +}} +{{eqn | ll= \leadsto + | l = \frac {\sin x} {\tan x} + | r = + \frac 1 {\sqrt {1 + \tan^2 x} } + | c = [[Tangent is Sine divided by Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin x + | r = + \frac {\tan x} {\sqrt {1 + \tan^2 x} } +}} +{{end-eqn}} +For the second part, if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$: +{{begin-eqn}} +{{eqn | l = \cos x + | r = -\frac 1 {\sqrt {1 + \tan^2 x} } + | c = [[Cosine in terms of Tangent]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\paren {\frac 1 {\cos x} } } + | r = -\frac 1 {\sqrt {1 + \tan^2 x} } +}} +{{eqn | ll= \leadsto + | l = \frac {\sin x} {\paren {\frac {\sin x} {\cos x} } } + | r = -\frac 1 {\sqrt {1 + \tan^2 x} } + | c = multiplying [[Definition:Denominator|denominator]] and [[Definition:Numerator|numerator]] by $\sin x$ +}} +{{eqn | ll= \leadsto + | l = \frac {\sin x} {\tan x} + | r = -\frac 1 {\sqrt {1 + \tan^2 x} } + | c = [[Tangent is Sine divided by Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin x + | r = -\frac {\tan x} {\sqrt {1 + \tan^2 x} } +}} +{{end-eqn}} +When $\cos x = 0$, $\tan x$ is undefined. +{{qed}} +\end{proof}<|endoftext|> +\section{Sine is Reciprocal of Cosecant} +Tags: Sine Function, Cosecant Function + +\begin{theorem} +:$\sin \theta = \dfrac 1 {\csc \theta}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \frac 1 {\sin \theta} + | r = \csc \theta + | c = [[Cosecant is Reciprocal of Sine]] +}} +{{eqn | ll= \leadsto + | l = \sin \theta + | r = \frac 1 {\csc \theta} +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Tangent is Reciprocal of Cotangent} +Tags: Tangent Function, Cotangent Function + +\begin{theorem} +:$\tan \theta = \dfrac 1 {\cot \theta}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \frac 1 {\tan \theta} + | r = \cot \theta + | c = [[Cotangent is Reciprocal of Tangent]] +}} +{{eqn | ll= \implies + | l = \tan \theta + | r = \frac 1 {\cot \theta} +}} +{{end-eqn}} +$\tan \theta$ is not defined when $\cos \theta = 0$, and $\cot \theta$ is not defined when $\sin \theta = 0$. +{{qed}} +\end{proof}<|endoftext|> +\section{Cosine is Reciprocal of Secant} +Tags: Cosine Function, Secant Function + +\begin{theorem} +:$\cos \theta = \dfrac 1 {\sec \theta}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \frac 1 {\cos \theta} + | r = \sec \theta + | c = [[Secant is Reciprocal of Cosine]] +}} +{{eqn | ll= \leadsto + | l = \cos \theta + | r = \frac 1 {\sec \theta} +}} +{{end-eqn}} +$\sec \theta$ and $\dfrac 1 {\cos \theta}$ are not defined when $\cos \theta = 0$. +{{qed}} +\end{proof}<|endoftext|> +\section{Sine of Complex Number} +Tags: Sine Function, Complex Numbers, Sine of Complex Number + +\begin{theorem} +Let $a$ and $b$ be [[Definition:Real Number|real numbers]]. +Let $i$ be the [[Definition:Imaginary Unit|imaginary unit]]. +Then: +:$\sin \paren {a + b i} = \sin a \cosh b + i \cos a \sinh b$ +where: +:$\sin$ denotes the [[Definition:Sine Function|sine function]] ([[Definition:Real Sine Function|real]] and [[Definition:Complex Sine Function|complex]]) +:$\cos$ denotes the [[Definition:Real Cosine Function|real cosine function]] +:$\sinh$ denotes the [[Definition:Hyperbolic Sine|hyperbolic sine function]] +:$\cosh$ denotes the [[Definition:Hyperbolic Cosine|hyperbolic cosine function]]. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sin \paren {a + b i} + | r = \sin a \cos \paren {b i} + \cos a \sin \paren {b i} + | c = [[Sine of Sum]] +}} +{{eqn | r = \sin a \cosh b + \cos a \sin \paren {b i} + | c = [[Hyperbolic Cosine in terms of Cosine]] +}} +{{eqn | r = \sin a \cosh b + i \cos a \sinh b + | c = [[Hyperbolic Sine in terms of Sine]] +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sin a \cosh b + i \cos a \sinh b + | r = \frac {e^{i a} - e^{-i a} } {2 i} \frac {e^b - e^{-b} } 2 + i \frac {e^{i a} + e^{-i a} } 2 \frac {e^b - e^{-b} } 2 + | c = [[Sine Exponential Formulation]], [[Cosine Exponential Formulation]], Definition of [[Definition:Hyperbolic Sine|Hyperbolic Sine]], Definition of [[Definition:Hyperbolic Cosine|Hyperbolic Cosine]] +}} +{{eqn | r = \frac {e^{b + i a} - e^{-b + i a} - e^{b - i a} + e^{-b - i a} - e^{b + i a} + e^{-b + i a} - e^{b - i a} + e^{-b - i a} } {4 i} + | c = simplifying +}} +{{eqn | r = \frac {e^{-b - i a} - e^{b - i a} } {2 i} + | c = simplifying +}} +{{eqn | r = \frac {e^{i \left({a + b i}\right)} - e^{-i \left({a + b i}\right)} } {2 i} + | c = +}} +{{eqn | r = \sin \left({a + b i}\right) + | c = [[Sine Exponential Formulation]] +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sinh \paren {a + b i} + | r = \sinh a \cosh \paren {b i} + \cosh a \sinh \paren {b i} + | c = [[Hyperbolic Sine of Sum]] +}} +{{eqn | r = \sinh a \cos b + \cosh a \sin \paren {b i} + | c = [[Cosine in terms of Hyperbolic Cosine]] +}} +{{eqn | r = \sinh a \cos b + i \cosh a \sin b + | c = [[Sine in terms of Hyperbolic Sine]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cosine of Complex Number} +Tags: Cosine Function, Complex Numbers, Cosine of Complex Number + +\begin{theorem} +Let $a$ and $b$ be [[Definition:Real Number|real numbers]]. +Let $i$ be the [[Definition:Imaginary Unit|imaginary unit]]. +Then: +:$\cos \left({a + b i}\right) = \cos a \cosh b - i \sin a \sinh b$ +where: +:$\cos$ denotes the [[Definition:Cosine Function|cosine function]] ([[Definition:Real Cosine Function|real]] and [[Definition:Complex Cosine Function|complex]]) +:$\sin$ denotes the [[Definition:Real Sine Function|real sine function]] +:$\sinh$ denotes the [[Definition:Hyperbolic Sine|hyperbolic sine function]] +:$\cosh$ denotes the [[Definition:Hyperbolic Cosine|hyperbolic cosine function]] +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cos \paren {a + b i} + | r = \cos a \cos \paren {b i} - \sin a \sin \paren {b i} + | c = [[Cosine of Sum]] +}} +{{eqn | r = \cos a \cosh b - \sin a \sin \paren {b i} + | c = [[Hyperbolic Cosine in terms of Cosine]] +}} +{{eqn | r = \cos a \cosh b - i \sin a \sinh b + | c = [[Hyperbolic Sine in terms of Sine]] +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cos a \cosh b - i \sin a \sinh b + | r = \frac {e^{i a} + e^{-i a} } 2 \frac {e^b + e^{-b} } 2 - i \frac {e^{i a} - e^{-i a} } {2 i} \frac {e^b - e^{-b} } 2 + | c = [[Cosine Exponential Formulation]], {{Defof|Hyperbolic Cosine}}, [[Sine Exponential Formulation]], {{Defof|Hyperbolic Sine}} +}} +{{eqn | r = \frac {e^{b + i a} + e^{-b + i a} + e^{b - i a} + e^{-b - i a} - e^{b + i a} + e^{-b + i a} + e^{b - i a} - e^{-b - i a} } 4 + | c = simplifying +}} +{{eqn | r = \frac {e^{-b + i a} + e^{b - i a} } 2 + | c = simplifying +}} +{{eqn | r = \frac {e^{i \paren {a + b i} } + e^{-i \paren {a + b i} } } 2 + | c = +}} +{{eqn | r = \cos \paren {a + b i} + | c = [[Cosine Exponential Formulation]] +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map \cosh {a + b i} + | r = \cosh a \map \cosh {b i} + \sinh a \map \sinh {b i} + | c = [[Hyperbolic Cosine of Sum]] +}} +{{eqn | r = \cosh a \cos b + \sinh a \map \sinh {b i} + | c = [[Cosine in terms of Hyperbolic Cosine]] +}} +{{eqn | r = \cosh a \cos b + i \sinh a \sin b + | c = [[Sine in terms of Hyperbolic Sine]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Tangent of Complex Number} +Tags: Tangent Function, Complex Numbers, Tangent of Complex Number + +\begin{theorem} +Let $a$ and $b$ be [[Definition:Real Number|real numbers]]. +Let $i$ be the [[Definition:Imaginary Unit|imaginary unit]]. +Then: +\end{theorem}<|endoftext|> +\section{Cosine in terms of Sine} +Tags: Cosine Function, Sine Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \cos x + | r = +\sqrt {1 - \sin^2 x} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | r = -\sqrt {1 - \sin^2 x} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cos^2 x + \sin^2 x + | r = 1 + | c = [[Sum of Squares of Sine and Cosine]] +}} +{{eqn | ll= \leadsto + | l = \cos^2 x + | r = 1 - \sin^2 x +}} +{{eqn | ll= \leadsto + | l = \cos x + | r = \pm \sqrt {1 - \sin^2 x} +}} +{{end-eqn}} +Then from [[Sign of Cosine]]: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Sine in terms of Secant} +Tags: Sine Function, Secant Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \sin x + | r = + \frac {\sqrt{\sec ^2 x - 1} } {\sec x} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \sin x + | r = - \frac {\sqrt{\sec ^2 x - 1} } {\sec x} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +For the first part, if there exists [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$: +{{begin-eqn}} +{{eqn | l = \tan x + | r = +\sqrt {\sec^2 x - 1} + | c = [[Tangent in terms of Secant]] +}} +{{eqn | ll= \leadsto + | l = \frac {\sin x} {\cos x} + | r = +\sqrt {\sec^2 x - 1} + | c = [[Tangent is Sine divided by Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin x \sec x + | r = +\sqrt {\sec^2 x - 1} + | c = [[Secant is Reciprocal of Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin x + | r = +\frac {\sqrt {\sec^2 x - 1} } {\sec x} +}} +{{end-eqn}} +For the second part, if there exists [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$: +{{begin-eqn}} +{{eqn | l = \tan x + | r = -\sqrt {\sec^2 x - 1} + | c = [[Tangent in terms of Secant]] +}} +{{eqn | ll= \leadsto + | l = \frac {\sin x} {\cos x} + | r = -\sqrt {\sec^2 x - 1} + | c = [[Tangent is Sine divided by Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin x \sec x + | r = -\sqrt {\sec^2 x - 1} + | c = [[Secant is Reciprocal of Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin x + | r = -\frac {\sqrt {\sec^2 x - 1} } {\sec x} +}} +{{end-eqn}} +When $\cos x = 0$, $\sec x$ is undefined. +{{qed}} +\end{proof}<|endoftext|> +\section{Sign of Sine} +Tags: Sine Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +{{begin-eqn}} +{{eqn | l = \sin x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ +}} +{{eqn | l = \sin x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ +}} +{{end-eqn}} +where $\sin$ is the [[Definition:Real Sine Function|real sine function]]. +\end{theorem} + +\begin{proof} +First the case where $n \ge 0$ is addressed. +The proof proceeds by [[Principle of Mathematical Induction|induction]]. +For all $n \in \Z_{\ge 0}$, let $\map P n$ be the [[Definition:Proposition|proposition]]: +:$\forall x \in \R:$ +::$2 n \pi < x < \paren {2 n + 1} \pi \implies \sin x > 0$ +::$\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi \implies \sin x < 0$ +=== Basis for the Induction === +Let $n = 0$. +From the [[Sine and Cosine are Periodic on Reals/Corollary|corollary to Sine and Cosine are Periodic on Reals]]: +:$\sin x$ is [[Definition:Strictly Positive|strictly positive]] on the [[Definition:Open Real Interval|interval]] $\openint 0 \pi$ +and: +:$\sin x$ is [[Definition:Strictly Negative|strictly negative]] on the [[Definition:Open Real Interval|interval]] $\openint \pi {2 \pi}$ +Thus: +:$2 \cdot 0 \cdot \pi < x < \paren {2 \cdot 0 + 1} \pi \implies \sin x > 0$ +:$\paren {2 \cdot 0 \cdot + 1} \pi < x < \paren {2 \cdot 0 + 2} \pi \implies \sin x < 0$ +This is our [[Definition:Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that if $\map P k$ is true, where $k \ge 0$, then it logically follows that $\map P {k + 1}$ is true. +So this is our [[Definition:Induction Hypothesis|induction hypothesis]]: +:$\forall x \in \R:$ +::$2 k \pi < x < \paren {2 k + 1} \pi \implies \sin x > 0$ +::$\paren {2 k + 1} \pi < x <\paren {2 k + 2} \pi \implies \sin x < 0$ +Then we need to show: +:$\forall x \in \R:$ +::$2 \paren {k + 1} \pi < x < \paren {2 \paren {k + 1} + 1} \pi \implies \sin x > 0$ +::$\paren {2 \paren {k + 1} + 1} \pi < x < \paren {2 \paren {k + 1} + 2} \pi \implies \sin x < 0$ +That is: +:$\forall x \in \R:$ +::$\paren {2 k + 2} \pi < x < \paren {2 k + 3} \pi \implies \sin x > 0$ +::$\paren {2 k + 3} \pi < x < \paren {2 k + 4} \pi \implies \sin x < 0$ +=== Induction Step === +This is our [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = 2 k \pi < x < \paren {2 k + 1} \pi + | o = \implies + | r = \sin x > 0 +}} +{{eqn | ll= \leadsto + | l = 2 k \pi < \paren {x - 2 \pi} < \paren {2 k + 1} \pi + | o = \implies + | r = \map \sin {x - 2 \pi} > 0 + | c = replacing $x$ with $x - 2 \pi$ +}} +{{eqn | ll= \leadsto + | l = 2 k \pi < \paren {x - 2 \pi} < \paren {2 k + 1} \pi + | o = \implies + | r = \sin x > 0 + | c = [[Sine and Cosine are Periodic on Reals]] +}} +{{eqn | ll= \leadsto + | l = \paren {2 k + 2} \pi < x < \paren {2 k + 3} \pi + | o = \implies + | r = \sin x > 0 + | c = adding $2 \pi$ to all elements of inequality +}} +{{end-eqn}} +Also: +{{begin-eqn}} +{{eqn | l = \paren {2 k + 1} \pi < x < \paren {2 k + 2} \pi + | o = \implies + | r = \sin x < 0 +}} +{{eqn | ll= \leadsto + | l = \paren {2 k + 1} \pi < \paren {x - 2 \pi} < \paren {2 k + 2} \pi + | o = \implies + | r = \map \sin {x - 2 \pi} < 0 + | c = replacing $x$ with $x - 2 \pi$ +}} +{{eqn | ll= \leadsto + | l = \paren {2 k + 1} \pi < \paren {x - 2 \pi} < \paren {2 k + 2} \pi + | o = \implies + | r = \sin x < 0 + | c = [[Sine and Cosine are Periodic on Reals]] +}} +{{eqn | ll= \leadsto + | l = \paren {2 k + 3} \pi < x < \paren {2 k + 4} \pi + | o = \implies + | r = \sin x < 0 + | c = adding $2 \pi$ to all elements of inequality +}} +{{end-eqn}} +It follows by [[Principle of Mathematical Induction|induction]] that: +:$\forall n \in \Z_{\ge 0}: \forall x \in \R:$ +::$2 n \pi < x < \paren {2 n + 1} \pi \implies \sin x > 0$ +::$\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi \implies \sin x < 0$ +{{qed|lemma}} +=== Negative $n$ === +Let $n \in \Z_{\le 0}$ be a [[Definition:Negative Integer|negative integer]]. +Then, by definition, $-\paren {n + 1}$ is a [[Definition:Strictly Positive Integer|(strictly) positive integer]]. +So: +{{begin-eqn}} +{{eqn | l = 2 \paren {-\paren {n + 1} } \pi < x < \paren {2 \paren {-\paren {n + 1} } + 1} \pi + | o = \implies + | r = \sin x > 0 + | c = Result for [[Definition:Positive Integer|positive $n$]] above +}} +{{eqn | l = -\paren {2 n + 2} \pi < x < -\paren {2 n + 1} \pi + | o = \implies + | r = \sin x > 0 + | c = simplifying +}} +{{eqn | ll= \leadsto + | l = -\paren {2 n + 2} \pi < \paren {-x} < -\paren {2 n + 1} \pi + | o = \implies + | r = \map \sin {-x} > 0 + | c = replacing $x$ with $-x$ +}} +{{eqn | ll= \leadsto + | l = -\paren {2 n + 2} \pi < \paren {-x} < -\paren {2 n + 1} \pi + | o = \implies + | r = -\paren {\sin x} > 0 + | c = [[Sine Function is Odd]] +}} +{{eqn | ll= \leadsto + | l = \paren {2 n + 2} \pi > x > \paren {2 n + 1} \pi + | o = \implies + | r = \sin x < 0 + | c = multiplying throughout by $-1$ +}} +{{eqn | ll= \leadsto + | l = \paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi + | o = \implies + | r = \sin x < 0 + | c = rearranging +}} +{{end-eqn}} +Similarly: +{{begin-eqn}} +{{eqn | l = \paren {2 \paren {-\paren {n + 1} } + 1} \pi < x < \paren {2 \paren {-\paren {n + 1} } + 2} \pi + | o = \implies + | r = \sin x < 0 + | c = Result for [[Definition:Positive Integer|positive $n$]] above +}} +{{eqn | l = -\paren {2 n + 1} \pi < x < -2 n \pi + | o = \implies + | r = \sin x < 0 + | c = simplifying +}} +{{eqn | ll= \leadsto + | l = -\paren {2 n + 1} \pi < \paren {-x} < -2 n \pi + | o = \implies + | r = \map \sin {-x} < 0 + | c = replacing $x$ with $-x$ +}} +{{eqn | ll= \leadsto + | l = -\paren {2 n + 1} \pi < \paren {-x} < -2 n \pi + | o = \implies + | r = -\paren {\sin x} < 0 + | c = [[Sine Function is Odd]] +}} +{{eqn | ll= \leadsto + | l = -\paren {2 n + 1} \pi > x > 2 n \pi + | o = \implies + | r = \sin x > 0 + | c = multiplying throughout by $-1$ +}} +{{eqn | ll= \leadsto + | l = 2 n \pi < x < \paren {2 n + 1} \pi + | o = \implies + | r = \sin x > 0 + | c = simplifying and rearranging +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Second Principle of Mathematical Induction} +Tags: Mathematical Induction, Principle of Mathematical Induction, Proof Techniques, Second Principle of Mathematical Induction + +\begin{theorem} +Let $\map P n$ be a [[Definition:Propositional Function|propositional function]] depending on $n \in \Z$. +Let $n_0 \in \Z$ be given. +Suppose that: +:$(1): \quad \map P {n_0}$ is [[Definition:True|true]] +:$(2): \quad \forall k \in \Z: k \ge n_0: \map P {n_0} \land \map P {n_0 + 1} \land \ldots \land \map P {k - 1} \land \map P k \implies \map P {k + 1}$ +Then: +:$\map P n$ is [[Definition:True|true]] for all $n \ge n_0$. +This process is called '''proof by (mathematical) induction'''. +\end{theorem} + +\begin{proof} +For each $n \ge n_0$, let $\map {P'} n$ be defined as: +:$\map {P'} n := \map P {n_0} \land \dots \land \map P n$ +It suffices to show that $\map {P'} n$ is true for all $n \ge n_0$. +It is immediate from the assumption $\map P {n_0}$ that $\map {P'} {n_0}$ is [[Definition:True|true]]. +Now suppose that $\map {P'} n$ holds. +By $(2)$, this implies that $\map P {n + 1}$ holds as well. +Consequently, $\map {P'} n \land \map P {n + 1} = \map {P'} {n + 1}$ holds. +Thus by the [[Principle of Mathematical Induction]]: +:$\map {P'} n$ holds for all $n \ge n_0$ +as desired. +{{Qed}} +\end{proof}<|endoftext|> +\section{Second Principle of Finite Induction} +Tags: Named Theorems, Principle of Finite Induction, Mathematical Induction, Proof Techniques, Second Principle of Finite Induction + +\begin{theorem} +Let $S \subseteq \Z$ be a [[Definition:Subset|subset]] of the [[Definition:Integer|integers]]. +Let $n_0 \in \Z$ be given. +Suppose that: +:$(1): \quad n_0 \in S$ +:$(2): \quad \forall n \ge n_0: \paren {\forall k: n_0 \le k \le n \implies k \in S} \implies n + 1 \in S$ +Then: +:$\forall n \ge n_0: n \in S$ +\end{theorem} + +\begin{proof} +Define $T$ as: +:$T = \set {n \in \Z : \forall k: n_0 \le k \le n: k \in S}$ +Since $n \le n$, it follows that $T \subseteq S$. +Therefore, it will suffice to show that: +:$\forall n \ge n_0: n \in T$ +Firstly, we have that $n_0 \in T$ {{iff}} the following condition holds: +:$\forall k: n_0 \le k \le n_0 \implies k \in S$ +Since $n_0 \in S$, it thus follows that $n_0 \in T$. +Now suppose that $n \in T$; that is: +:$\forall k: n_0 \le k \le n \implies k \in S$ +By $(2)$, this implies: +:$n + 1 \in S$ +Thus, we have: +:$\forall k: n_0 \le k \le n + 1 \implies k \in S$ +{{MissingLinks|[[Closed Interval of Naturally Ordered Semigroup with Successor equals Union with Successor]] for $\Z$}} +Therefore, $n + 1 \in T$. +Hence, by the [[Principle of Finite Induction]]: +:$\forall n \ge n_0: n \in T$ +as desired. +{{Qed}} +\end{proof}<|endoftext|> +\section{Sign of Cosine} +Tags: Cosine Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +Then: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +where $\cos$ is the [[Definition:Real Cosine Function|real cosine function]]. +\end{theorem} + +\begin{proof} +Proof by [[Principle of Mathematical Induction|induction]]: +=== Base case === +For $n = 0$, it follows from [[Sine and Cosine are Periodic on Reals/Corollary]]. +=== Induction Hypothesis === +This is our [[Definition:Induction Hypothesis|induction hypothesis]]: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +Now we need to show true for $n=k+1$: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 3 2} \pi < x < \paren {2 n + \dfrac 5 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 5 2} \pi < x < \paren {2 n + \dfrac 7 2} \pi$ +}} +{{end-eqn}} +=== Induction Step === +This is our [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 3 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 5 2} \pi$ +}} +{{eqn | l = \map \cos {x + 2 \pi} + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 3 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 5 2} \pi$ + | cc = [[Sine and Cosine are Periodic on Reals]] +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 3 2} \pi < x < \paren {2 n + \dfrac 5 2} \pi$ + | cc = Replacing $x + 2 \pi$ by $x$ +}} +{{end-eqn}} +Also: +{{begin-eqn}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 5 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 7 2} \pi$ +}} +{{eqn | l = \map \cos {x + 2 \pi} + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 5 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 7 2} \pi$ + | cc = [[Sine and Cosine are Periodic on Reals]] +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 5 2} \pi < x < \paren {2 n + \dfrac 7 2} \pi$ + | cc = Replacing $x + 2 \pi$ by $x$ +}} +{{end-eqn}} +The result follows by [[Principle of Mathematical Induction|induction]]. +For negative $n$: +=== Induction Hypothesis === +This is our [[Definition:Induction Hypothesis|induction hypothesis]]: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = for $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +Now we need to show true for $n=k-1$: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 5 2} \pi < x < \paren {2 n - \dfrac 3 2} \pi$ +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 3 2} \pi < x < \paren {2 n - \dfrac 1 2} \pi$ +}} +{{end-eqn}} +=== Induction Step === +This is our [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \map \cos {x + 2 \pi} + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 1 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 1 2} \pi$ + | cc= replacing $x$ by $x + 2 \pi$ +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 1 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 1 2} \pi$ + | cc= [[Sine and Cosine are Periodic on Reals]] +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 5 2} \pi < x < \paren {2 n - \dfrac 3 2} \pi$ +}} +{{end-eqn}} +Also: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{eqn | l = \map \cos {x + 2 \pi} + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 1 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 3 2} \pi$ + | cc= replacing $x$ by $x + 2 \pi$ +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n + \dfrac 1 2} \pi < x + 2 \pi < \paren {2 n + \dfrac 3 2} \pi$ + | cc= [[Sine and Cosine are Periodic on Reals]] +}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = for $\paren {2 n - \dfrac 3 2} \pi < x < \paren {2 n - \dfrac 1 2} \pi$ +}} +{{end-eqn}} +The result follows by [[Principle of Mathematical Induction|induction]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Sign of Tangent} +Tags: Tangent Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +Then: +{{begin-eqn}} +{{eqn | l = \tan x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \tan x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ +}} +{{end-eqn}} +where $\tan$ denotes the [[Definition:Tangent Function|tangent function]]. +\end{theorem} + +\begin{proof} +From [[Tangent is Sine divided by Cosine]]: +:$\tan x = \dfrac {\sin x} {\cos x}$ +Since $n$ is an integer, $n$ is either odd or even. +=== Case 1 === +Let $n$ be [[Definition:Odd Integer|odd]]. +Hence let $m$ be an [[Definition:Integer|integer]] such that $n = 2 m + 1$. +==== Case 1.1 ==== +:$n \pi < x < \paren {n + \dfrac 1 2} \pi \implies \paren {2 m + 1} \pi < x < \paren {2 m + \dfrac 3 2} \pi$ +From [[Sign of Sine]], $\sin x$ is [[Definition:Negative Real Number|negative]]. +From [[Sign of Cosine]], $\cos x$ is [[Definition:Negative Real Number|negative]]. +Therefore: +:$\tan x = \dfrac {\sin x} {\cos x} > 0$ +==== Case 1.2 ==== +:$\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi \implies \paren {2 m + \dfrac 3 2} \pi < x < \paren {2 m + 2} \pi$ +From [[Sign of Sine]], $\sin x$ is [[Definition:Negative Real Number|negative]]. +From [[Sign of Cosine]], $\cos x$ is [[Definition:Positive Real Number|positive]]. +Therefore: +:$\tan x = \dfrac {\sin x} {\cos x} < 0$ +=== Case 2 === +Let $n$ be [[Definition:Even Integer|even]]. +Hence let $m$ be an [[Definition:Integer|integer]] such that $n = 2 m$. +==== Case 2.1 ==== +:$n \pi < x < \paren {n + \dfrac 1 2} \pi \implies 2 m \pi < x < \paren {2 m + \dfrac 1 2} \pi$ +From [[Sign of Sine]], $\sin x$ is [[Definition:Positive Real Number|positive]]. +From [[Sign of Cosine]], $\cos x$ is [[Definition:Positive Real Number|positive]]. +Therefore: +:$\tan x = \dfrac {\sin x} {\cos x} > 0$ +==== Case 2.2 ==== +:$\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi \implies \paren {2 m + \dfrac 1 2} \pi < x < \paren {2 m + 1} \pi$ +From [[Sign of Sine]], $\sin x$ is [[Definition:Positive Real Number|positive]]. +From [[Sign of Cosine]], $\cos x$ is [[Definition:Negative Real Number|negative]]. +Therefore: +:$\tan x = \dfrac {\sin x} {\cos x} < 0$ +Therefore: +{{begin-eqn}} +{{eqn | l = \tan x + | o = > + | r = 0 + | c = if there exists [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \tan x + | o = < + | r = 0 + | c = if there exists [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Reciprocal of Strictly Negative Real Number is Strictly Negative} +Tags: Real Numbers, Reciprocals + +\begin{theorem} +:$\forall x \in \R: x < 0 \implies \dfrac 1 x < 0$ +\end{theorem} + +\begin{proof} +Let $x < 0$. +{{AimForCont}} $\dfrac 1 x > 0$. +Then: +{{begin-eqn}} +{{eqn | l = x + | o = < + | r = 0 + | c = +}} +{{eqn | ll= \leadsto + | l = x \times \dfrac 1 x + | o = < + | r = 0 \times 0 + | c = [[Real Number Ordering is Compatible with Multiplication/Negative Factor|Real Number Ordering is Compatible with Multiplication: Negative Factor]] +}} +{{eqn | ll= \leadsto + | l = 1 + | o = < + | r = 0 + | c = [[Definition:Real Number Axioms|Real Number Axioms: $\R \text M 4$: Inverse]] +}} +{{end-eqn}} +But from [[Real Zero is Less than Real One]]: +:$1 > 0$ +Therefore by [[Proof by Contradiction]]: +:$\dfrac 1 x < 0$ +{{qed}} +\end{proof}<|endoftext|> +\section{Sign of Cosecant} +Tags: Cosecant Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +Then: +{{begin-eqn}} +{{eqn | l = \csc x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ +}} +{{eqn | l = \csc x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ +}} +{{end-eqn}} +where $\csc$ is the [[Definition:Real Cosecant Function|real cosecant function]]. +\end{theorem} + +\begin{proof} +For the first part: +{{begin-eqn}} +{{eqn | l = \sin x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ + | cc= [[Sign of Sine]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\sin x} + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ + | cc= [[Reciprocal of Strictly Positive Real Number is Strictly Positive]] +}} +{{eqn | ll= \leadsto + | l = \csc x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $2 n \pi < x < \paren {2 n + 1} \pi$ + | cc= [[Cosecant is Reciprocal of Sine]] +}} +{{end-eqn}} +For the second part: +{{begin-eqn}} +{{eqn | l = \sin x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ + | cc= [[Sign of Sine]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\sin x} + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ + | cc= [[Reciprocal of Strictly Negative Real Number is Strictly Negative]] +}} +{{eqn | ll= \leadsto + | l = \csc x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + 1} \pi < x < \paren {2 n + 2} \pi$ + | cc= [[Cosecant is Reciprocal of Sine]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Sign of Secant} +Tags: Secant Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +{{begin-eqn}} +{{eqn | l = \sec x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \sec x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +where $\sec$ is the [[Definition:Real Secant Function|real secant function]]. +\end{theorem} + +\begin{proof} +For the first part: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ + | cc= [[Sign of Cosine]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\cos x} + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ + | cc= [[Reciprocal of Strictly Positive Real Number is Strictly Positive]] +}} +{{eqn | ll= \leadsto + | l = \sec x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ + | cc= [[Secant is Reciprocal of Cosine]] +}} +{{end-eqn}} +For the second part: +{{begin-eqn}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ + | cc= [[Sign of Cosine]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\cos x} + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ + | cc= [[Reciprocal of Strictly Negative Real Number is Strictly Negative]] +}} +{{eqn | ll= \leadsto + | l = \sec x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ + | cc= [[Secant is Reciprocal of Cosine]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Sign of Cotangent} +Tags: Cotangent Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +Then: +{{begin-eqn}} +{{eqn | l = \cot x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cot x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ +}} +{{end-eqn}} +where $\cot$ is the [[Definition:Real Cotangent Function|real cotangent function]]. +\end{theorem} + +\begin{proof} +For the first part: +{{begin-eqn}} +{{eqn | l = \tan x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ + | cc= [[Sign of Tangent]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 \tan x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ + | cc= [[Reciprocal of Strictly Positive Real Number is Strictly Positive]] +}} +{{eqn | ll= \leadsto + | l = \cot x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ + | cc= [[Cotangent is Reciprocal of Tangent]] +}} +{{end-eqn}} +For the second part: +{{begin-eqn}} +{{eqn | l = \tan x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ + | cc= [[Sign of Tangent]] +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\tan x} + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ + | cc= [[Reciprocal of Strictly Negative Real Number is Strictly Negative]] +}} +{{eqn | ll= \leadsto + | l = \cot x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ + | cc= [[Cotangent is Reciprocal of Tangent]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cosine in terms of Tangent} +Tags: Cosine Function, Tangent Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \cos x + | r = +\frac 1 {\sqrt {1 + \tan^2 x} } + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | r = -\frac 1 {\sqrt {1 + \tan^2 x} } + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sec^2 x - \tan^2 x + | r = 1 + | c = [[Difference of Squares of Secant and Tangent]] +}} +{{eqn | ll= \leadsto + | l = \sec^2 x + | r = 1 + \tan ^2 x +}} +{{eqn | ll= \leadsto + | l = \frac 1 {\cos^2 x} + | r = 1 + \tan^2 x + | c = [[Secant is Reciprocal of Cosine]] +}} +{{eqn | ll= \leadsto + | l = \cos^2 x + | r = \frac 1 {1 + \tan^2 x} +}} +{{eqn | ll= \leadsto + | l = \cos x + | r = \pm \frac 1 {\sqrt {1 + \tan^2 x} } +}} +{{end-eqn}} +Then from [[Sign of Cosine]]: +{{begin-eqn}} +{{eqn | l = \cos x + | o = > + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n - \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \cos x + | o = < + | r = 0 + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {2 n + \dfrac 1 2} \pi < x < \paren {2 n + \dfrac 3 2} \pi$ +}} +{{end-eqn}} +When $\cos x = 0$, $\tan x$ is undefined. +{{qed}} +\end{proof}<|endoftext|> +\section{Tangent in terms of Secant} +Tags: Tangent Function, Secant Function + +\begin{theorem} +{{begin-eqn}} +{{eqn | l = \tan x + | r = +\sqrt {\sec^2 x - 1} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$ +}} +{{eqn | l = \tan x + | r = -\sqrt {\sec^2 x - 1} + | c = if there exists an [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$ +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sec^2 x - \tan^2 x + | r = 1 + | c = [[Difference of Squares of Secant and Tangent]] +}} +{{eqn | ll= \leadsto + | l = \tan^2 x + | r = \sec^2 x - 1 +}} +{{eqn | ll= \leadsto + | l = \tan x + | r = \pm \sqrt {\sec^2 x - 1} +}} +{{end-eqn}} +Also, from [[Sign of Tangent]]: +:If there exists [[Definition:Integer|integer]] $n$ such that $n \pi < x < \paren {n + \dfrac 1 2} \pi$, $\tan x > 0$. +:If there exists [[Definition:Integer|integer]] $n$ such that $\paren {n + \dfrac 1 2} \pi < x < \paren {n + 1} \pi$, $\tan x < 0$. +When $\cos x = 0$, $\tan x$ and $\sec x$ is undefined. +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Left-Total Relations is Left-Total} +Tags: Relation Theory + +\begin{theorem} +Let $S_1, S_2, T_1, T_2$ be [[Definition:Set|sets]] or [[Definition:Class (Class Theory)|classes]]. +Let $\mathcal R_1 \subseteq S_1 \times T_1$ and $\mathcal R_2 \subseteq S_2 \times T_2$ be [[Definition:Left-Total Relation|left-total relations]]. +Then $\mathcal R_1 \cup \mathcal R_2$ is [[Definition:Left-Total Relation|left-total]]. +\end{theorem} + +\begin{proof} +Let both $\mathcal R_1$ and $\mathcal R_2$ be [[Definition:Left-Total Relation|left-total]]. +Let $\mathcal R = \mathcal R_1 \cup \mathcal R_2$. +Let $s \in S_1 \cup S_2$. +By the definition of [[Definition:Set Union|union]]: +:$s \in S_1 \lor s \in S_2$ +Thus $s \in S_i$ for $i \in \set {1, 2}$. +By definition of [[Definition:Left-Total Relation|left-total relation]], there is a $t \in T_i$ such that $\tuple {s, t} \in \mathcal R_i$. +We have that $\mathcal R$ is a [[Definition:Superset|superset]] of $\mathcal R_i$. +Hence from [[Union is Smallest Superset]]: +:$\tuple {s, t} \in \mathcal R_i \subseteq \mathcal R \implies \tuple {s, t} \in \mathcal R$ +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Inverse is Inverse of Union} +Tags: Inverse Relations + +\begin{theorem} +Let for $i \in \left\{1,2\right\}$ $\mathcal R_i \subseteq S_i \times T_i$ be a [[Definition:Relation|relation]] on $S_i \times T_i$. +Let $\mathcal R_i^{-1} \subseteq T_i \times S_i$ be the [[Definition:Inverse Relation|inverse]] of $\mathcal R_i$. +Then $\mathcal R_1^{-1} \cup \mathcal R_2^{-1} = \left(\mathcal R_1 \cup \mathcal R_2\right)^{-1}$ +\end{theorem} + +\begin{proof} +Let $\left(t,s\right) \in \mathcal R_1^{-1} \cup \mathcal R_2^{-1}$. +By the definition of [[Definition:Set Union|union]]: +$\left(t,s\right) \in \mathcal R_1^{-1} \vee \left(t,s\right) \in \mathcal R_2^{-1}$. +Assume $\left(t,s\right) \in \mathcal R_i^{-1}$. By the definition of [[Definition:Inverse_Relation|inverse]]: +$\left(s,t\right) \in \mathcal R_i$. +By [[Disjunction Introduction]]: +$\left(s,t\right) \in \mathcal R_1 \vee \left(s,t\right) \in \mathcal R_2 \iff \left(s,t\right) \in \mathcal R_1 \cup \mathcal R_2$. +And by the definition of [[Definition:Inverse_Relation|inverse]]: +$\left(t,s\right) \in \left(\mathcal R_1 \cup \mathcal R_2\right)^{-1}$. +{{qed}} +[[Category:Inverse Relations]] +ggcvvyz2qfw2ocefk8br3poepkyybtb +\end{proof}<|endoftext|> +\section{Condition for Darboux Integrability} +Tags: Integral Calculus + +\begin{theorem} +Let $\closedint a b$ be a [[Definition:Closed Real Interval|closed real interval]]. +Let $f$ be a [[Definition:Bounded Real-Valued Function|bounded]] [[Definition:Real Function|real function]] defined on $\closedint a b$. +Then $f$ is [[Definition:Darboux Integrable Function|Darboux integrable]] {{iff}}: +:for every $\epsilon \in \R_{>0}$, there exists a [[Definition:Finite Subdivision|finite subdivision]] $S$ of $\closedint a b$ such that $\map U S – \map L S < \epsilon$ +where +:$\map U S$ is the [[Definition:Upper Sum|upper sum]] of $f$ on $\closedint a b$ with respect to $S$ +:$\map L S$ is the [[Definition:Lower Sum|lower sum]] of $f$ on $\closedint a b$ with respect to $S$ +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Let $f$ be [[Definition:Darboux Integrable Function|Darboux integrable]]. +Let $\epsilon \in \R_{>0}$ be given. +It is to be proved that a [[Definition:Finite Subdivision|finite subdivision]] $S$ of $\closedint a b$ exists such that: +:$\map U S – \map L S < \epsilon$ +As $f$ is [[Definition:Darboux Integrable Function|Darboux integrable]]: +:$\displaystyle \int_a^b \map f x \rd x$ exists. +By the definition of the [[Definition:Darboux Integral|Darboux integral]]: +:the [[Definition:Lower Integral|lower integral]] $\displaystyle \underline {\int_a^b} \map f x \rd x$ exists. +Thus by the definition of [[Definition:Lower Integral|lower integral]]: +:$\sup_P \map L P$ exists +where: +:$\map L P$ denotes the [[Definition:Lower Sum|lower sum]] of $f$ on $\closedint a b$ with respect to the [[Definition:Finite Subdivision|finite subdivision]] $P$ +:$\sup_P \map L P$ denotes the [[Definition:Supremum of Subset of Real Numbers|supremum]] for $\map L P$. +Therefore by [[Supremum of Subset of Real Numbers is Arbitrarily Close]]: +:a [[Definition:Finite Subdivision|finite subdivision]] $S_1$ of $\closedint a b$ exists, satisfying: +::$\sup_P \map L P - \map L {S_1} < \dfrac \epsilon 2$ +In a similar way: +By the definition of the [[Definition:Darboux Integral|Darboux integral]]: +:the [[Definition:Upper Integral|upper integral]] $\displaystyle \overline {\int_a^b} \map f x \rd x$ exists. +Thus by the definition of [[Definition:Upper Integral|upper integral]]: +:$\inf_P \map U P$ exists +where: +:$\map U P$ denotes the [[Definition:Upper Sum|upper sum]] of $f$ on $\closedint a b$ with respect to the [[Definition:Finite Subdivision|finite subdivision]] $P$ +:$\inf_P \map U P$ denotes the [[Definition:Infimum of Subset of Real Numbers|infimum]] for $\map U P$. +Therefore by [[Infimum of Subset of Real Numbers is Arbitrarily Close]]: +:a [[Definition:Finite Subdivision|finite subdivision]] $S_2$ of $\closedint a b$ exists, satisfying: +::$\map U {S_2} - \inf_P \map U P < \dfrac \epsilon 2$ +Now let $S := S_1 \cup S_2$ be defined. +We observe: +:$S$ is either equal to $S_1$ or [[Definition:Finer Subdivision|finer]] than $S_1$ +:$S$ is either equal to $S_2$ or [[Definition:Finer Subdivision|finer]] than $S_2$ +We find: +:$\map L S \ge \map L {S_1}$ by the definition of [[Definition:Lower Sum|lower sum]] and $S$ [[Definition:Finer Subdivision|refining]] $S_1$ +:$\map U S \le \map U {S_2}$ by the definition of [[Definition:Upper Sum|upper sum]] and $S$ [[Definition:Finer Subdivision|refining]] $S_2$ +Recall that by the definition of [[Definition:Darboux Integrable Function|Darboux integrable]]: +:$\displaystyle \overline {\int_a^b} \map f x \rd x = \underline {\int_a^b} \map f x \rd x$ +Hence we have: +{{begin-eqn}} +{{eqn | l = \map U S – \map L S + | o = \le + | r = \map U {S_2} – \map L S + | c = as $\map U S \le \map U {S_2}$ +}} +{{eqn | o = \le + | r = \map U {S_2} – \map L {S_1} + | c = as $\map L S \ge L \map L {S_1}$ +}} +{{eqn | r = \map U {S_2} - \overline {\int_a^b} \map f x \rd x + \overline {\int_a^b} \map f x \rd x – \map L {S_1} +}} +{{eqn | r = \map U {S_2} - \overline{\int_a^b} \map f x \rd x + \underline{\int_a^b} \map f x \rd x – \map L {S_1} + | c = as $\displaystyle \overline {\int_a^b} \map f x \rd x = \underline {\int_a^b} \map f x \rd x$ +}} +{{eqn | r = \map U {S_2} - \inf_P \map U P + \sup_P \map L P – \map L {S_1} + | c = {{Defof|Upper Integral}} and {{Defof|Lower Integral}} +}} +{{eqn | o = < + | r = \frac \epsilon 2 + \sup_P \map L P – \map L {S_1} + | c = as $\map U {S_2} - \inf_P \map U P < \dfrac \epsilon 2$ +}} +{{eqn | o = < + | r = \frac \epsilon 2 + \frac \epsilon 2 + | c = as $\sup_P \map L P - \map L {S_1} < \dfrac \epsilon 2$ +}} +{{eqn | r = \epsilon +}} +{{end-eqn}} +{{qed|lemma}} +=== Sufficient Condition === +Let $\epsilon \in \R_{>0}$ be given. +Let $f$ be such that: +:there exists a [[Definition:Finite Subdivision|finite subdivision]] $S$ of $\closedint a b$ such that $\map U S – \map L S < \epsilon$. +We need to prove that $f$ is [[Definition:Darboux Integrable Function|Darboux integrable]]. +First we show that $\inf_P \map U P$ exists. +Let $T$ be defined as: +:$T := \leftset {\map U P: P}$ is a [[Definition:Finite Subdivision|finite subdivision]] of $\rightset {\closedint a b}$ +By: +:$\map U S – \map L S < \epsilon$ +we know that $\map U S$ exists. +From this we conclude that $T$ is [[Definition:Non-Empty Set|non-empty]]. +Because $f$ is [[Definition:Bounded Real-Valued Function|bounded]], we know by the definition of [[Definition:Upper Sum|upper sum]] that $T$ is [[Definition:Bounded Subset of Real Numbers|bounded]]. +From the [[Continuum Property]] it follows that $\inf_P \map U P$ exists. +Next we show that $\sup_P \map L P$ exists. +We do this similarly to how we showed that $\inf_P \map U P$ exists by focusing on lower sums instead of upper sums: +We find that $\leftset {\map L P: P}$ is a [[Definition:Finite Subdivision|finite subdivision]] of $\rightset {\closedint a b}$ is [[Definition:Non-Empty Set|non-empty]] and [[Definition:Bounded Subset of Real Numbers|bounded]]. +From the [[Continuum Property]] it follows that $\sup_P \map L P$ exists. +Observe: +:$\inf_P \map U P \le \map U S$ by the definition of [[Definition:Infimum of Subset of Real Numbers|infimum]] +:$\sup_P \map L P \ge \map L S$ by the definition of [[Definition:Supremum of Subset of Real Numbers|supremum]] +We have: +{{begin-eqn}} +{{eqn | l = \inf_P \map U P - \sup_P \map L P + | o = \le + | r = \map U S - \sup_P \map L P + | c = by $\inf_P \map U P \le \map U S$ +}} +{{eqn | o = \le + | r = \map U S - \map L S + | c = by $\sup_P \map L P \ge \map L S$ +}} +{{eqn | o = < + | r = \epsilon + | c = by $\map U S – \map L S < \epsilon$ +}} +{{end-eqn}} +Also: +{{begin-eqn}} +{{eqn | l = \sup_P \map L P - \inf_P \map U P + | o = \le + | r = \map U S - \inf_P \map U P + | c = [[Supremum of Lower Sums Never Greater than Upper Sum]] +}} +{{eqn | o = \le + | r = \map U S - \map L S + | c = [[Infimum of Upper Sums Never Smaller than Lower Sum]] +}} +{{eqn | o = < + | r = \epsilon + | c = as $\map U S – \map L S < \epsilon$ +}} +{{end-eqn}} +These two results give: +:$\size {\inf_P \map U P - \sup_P \map L P} < \epsilon$ +Since $\epsilon$ can be chosen arbitrarily small ($>0$), this means that: +:$\inf_P \map U P = \sup_P \map L P$ +From this it follows by the definitions of [[Definition:Upper Integral|upper]] and [[Definition:Lower Integral|lower integrals]] that: +:$\displaystyle \overline {\int_a^b} \map f x \rd x = \underline {\int_a^b} \map f x \rd x$ +Hence, by the definition of the [[Definition:Darboux Integral|Darboux integral]], $f$ is [[Definition:Darboux Integrable Function|Darboux integrable]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Open Set minus Closed Set is Open} +Tags: Topology, Open Sets, Closed Sets + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +For $A \subseteq S$ denote by $\complement_S \left({A}\right)$ the [[Definition:Relative Complement|relative complement]] of $A$ in $S$. +Let $U \in \tau$ and $\complement_S \left({V}\right) \in \tau$. +Then: +:$U \setminus V \in \tau$ +and: +:$\complement_S \left({V \setminus U}\right) \in \tau$ +\end{theorem} + +\begin{proof} +From [[Set Difference as Intersection with Relative Complement]]: +:$U \setminus V = U \cap \complement_S \left({V}\right)$ +Since $\tau$ is a [[Definition:Topology|topology]]: +:$U, \complement_S \left({V}\right) \in \tau \implies U \cap \complement_S \left({V}\right) \in \tau \implies U \setminus V \in \tau$ +The other statement follows [[Definition:Mutatis Mutandis|mutatis mutandis]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Right-Total Relations is Right-Total} +Tags: Relation Theory + +\begin{theorem} +Let $S_1, S_2, T_1, T_2$ be [[Definition:Set|sets]] or [[Definition:Class (Class Theory)|classes]]. +Let $\mathcal R_1 \subseteq S_1 \times T_1$ and $\mathcal R_2 \subseteq S_2 \times T_2$ be [[Definition:Right-Total Relation|right-total]] [[Definition:Relation|relations]]. +Then $\mathcal R_1 \cup \mathcal R_2$ is [[Definition:Right-Total Relation|right-total]]. +\end{theorem} + +\begin{proof} +Define the [[Definition:Predicate|predicates]] $L$ and $R$ by: +:$\map L X \iff \text {$X$ is left-total}$ +:$\map R X \iff \text {$X$ is right-total}$ +{{begin-eqn}} +{{eqn | l = \map R {\mathcal R_1} \land \map R {\mathcal R_2} + | o = \leadsto + | r = \map L {\mathcal R_1^{-1} } \land \map L {\mathcal R_2^{-1} } + | c = [[Inverse of Right-Total Relation is Left-Total]] +}} +{{eqn | o = \leadsto + | r = \map L {\mathcal R_1^{-1} \cup \mathcal R_2^{-1} } + | c = [[Union of Left-Total Relations is Left-Total]] +}} +{{eqn | o = \leadsto + | r = \map L {\paren {\mathcal R_1 \cup \mathcal R_2}^{-1} } + | c = [[Union of Inverse is Inverse of Union]] +}} +{{eqn | o = \leadsto + | r = \map R {\mathcal R_1 \cup \mathcal R_2} + | c = [[Inverse of Right-Total Relation is Left-Total]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Primitive of x over a x + b squared by p x + q/Corollary} +Tags: Primitives involving a x + b and p x + q + +\begin{theorem} +:$\displaystyle \int \frac {x \rd x} {\paren {a x + b}^2 \paren {p x + q} } = \frac 1 {b p - a q} \paren {\frac q {b p - a q} \ln \size {\frac {a x + b} {p x + q} } + \frac x {a x + b} } + C$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \int \frac {x \rd x} {\paren {a x + b}^2 \paren {p x + q} } + | r = \frac 1 {b p - a q} \paren {\frac q {b p - a q} \ln \size {\frac {a x + b} {p x + q} } - \frac b {a \paren {a x + b} } } + C + | c = [[Primitive of x over a x + b squared by p x + q|Primitive of $\dfrac x {\paren {a x + b}^2 \paren {p x + q} }$]] +}} +{{eqn | r = \frac 1 {b p - a q} \paren {\frac q {b p - a q} \ln \size {\frac {a x + b} {p x + q} } - \frac {a x + b} {a \paren {a x + b} } + \frac {a x} {a \paren {a x + b} } } + C + | c = +}} +{{eqn | r = \frac 1 {b p - a q} \paren {\frac q {b p - a q} \ln \size {\frac {a x + b} {p x + q} } - \frac 1 a + \frac x {a x + b} } + C + | c = +}} +{{eqn | r = \frac 1 {b p - a q} \paren {\frac q {b p - a q} \ln \size {\frac {a x + b} {p x + q} } + \frac x {a x + b} } + \paren {C - \frac 1 {a \paren {b p - a q} } } + | c = +}} +{{eqn | r = \frac 1 {b p - a q} \paren {\frac q {b p - a q} \ln \size {\frac {a x + b} {p x + q} } + \frac x {a x + b} } + C + | c = subsuming constant +}} +{{end-eqn}} +{{qed}} +[[Category:Primitives involving a x + b and p x + q]] +8slaipf6109lrl1iotg7c3y113hiik3 +\end{proof}<|endoftext|> +\section{Arctangent of Imaginary Number} +Tags: Complex Numbers, Tangent Function + +\begin{theorem} +Let $x$ belong to the [[Definition:Open Real Interval|open real interval]] $\openint {-1} 1$. +Then: +:$\map {\tan^{-1} } {i x} = \dfrac i 2 \map \ln {\dfrac {1 + x} {1 - x} }$ +where $\tan$ is the [[Definition:Complex Tangent Function|complex tangent function]], $\ln$ is the [[Definition:Real Natural Logarithm|real natural logarithm]], and $i$ is the [[Definition:Imaginary Unit|imaginary unit]]. +\end{theorem} + +\begin{proof} +Let $y = \map {\tan^{-1} } {i x}$. +Let $x = \tanh \theta$, then $\theta = \tanh^{-1} x$. +{{begin-eqn}} +{{eqn | l = \tan y + | r = i x + | c = +}} +{{eqn | l = \tan y + | r = i \tanh \theta + | c = +}} +{{eqn | l = \tan y + | r = \map \tan {i \theta} + | c = [[Hyperbolic Tangent in terms of Tangent]] +}} +{{eqn | ll= \leadsto + | l = y + | r = i \theta + | c = +}} +{{eqn | l = y + | r = i \tanh^{-1} x + | c = +}} +{{eqn | l = y + | r = \frac i 2 \map \ln {\frac {1 + x} {1 - x} } + | c = {{Defof|Real Hyperbolic Arctangent}} +}} +{{end-eqn}} +{{qed}} +[[Category:Complex Numbers]] +[[Category:Tangent Function]] +ltvuizouywxeukypjt6rxq3tg1fc6c2 +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Real Inverse Hyperbolic Tangent} +Tags: Inverse Hyperbolic Tangent + +\begin{theorem} +Let $S$ denote the [[Definition:Open Real Interval|open real interval]]: +: $S := \left({-1 \,.\,.\, 1}\right)$ +{{TFAE|def = Real Inverse Hyperbolic Tangent}} +\end{theorem} + +\begin{proof} +=== Definition 1 implies Definition 2 === +Let $x = \tanh y$. +Then: +{{begin-eqn}} +{{eqn | l = x + | r = \frac {e^{2 y} - 1} {e^{2 y} + 1} + | c = Definition of [[Definition:Hyperbolic Tangent/Definition 3|Hyperbolic Tangent]] +}} +{{eqn | ll = \implies + | l = x e^{2 y} + x + | r = e^{2 y} - 1 + | c = +}} +{{eqn | ll = \implies + | l = e^{2 y} - x e^{2 y} + | r = 1 + x + | c = +}} +{{eqn | ll = \implies + | l = e^{2 y} + | r = \frac {1 + x} {1 - x} + | c = +}} +{{eqn | ll = \implies + | l = 2 y + | r = \ln \left({ \frac {1 + x} {1 - x} }\right) + | c = +}} +{{eqn | ll = \implies + | l = y + | r = \frac 1 2 \ln \left({ \frac {1 + x} {1 - x} }\right) + | c = +}} +{{end-eqn}} +{{qed|lemma}} +=== Definition 2 implies Definition 1 === +Let $y = \dfrac {1 + x} {1 - x}$. +{{begin-eqn}} +{{eqn | l = \tanh \left({\frac 1 2 \ln \left({ \frac {1 + x} {1 - x} }\right)}\right) + | r = \tanh \left({\frac 1 2 \ln y}\right) + | c = +}} +{{eqn | r = \frac {e^{2 \left({\frac 1 2 \ln y}\right) - 1} } {e^{2 \left({\frac 1 2 \ln y}\right) + 1} } + | c = Definition of [[Definition:Hyperbolic Tangent/Definition 3|Hyperbolic Tangent]] +}} +{{eqn | r = \frac {e^{\ln y} - 1} {e^{\ln y} + 1} + | c = +}} +{{eqn | r = \frac {y - 1} {y + 1} + | c = [[Exponential of Natural Logarithm]] +}} +{{eqn | r = \frac {\frac {1 + x} {1 - x} - 1} {\frac {1 + x} {1 - x} + 1} + | c = +}} +{{eqn | r = \frac {\left({1 + x}\right) - \left({1 - x}\right)} {\left({1 + x}\right) + \left({1 - x}\right)} + | c = +}} +{{eqn | r = \frac {2x} 2 + | c = +}} +{{eqn | r = x + | c = +}} +{{end-eqn}} +{{qed|lemma}} +Therefore: +{{begin-eqn}} +{{eqn | n = 1 + | l = x = \tanh \left({y}\right) + | o = \implies + | r = y = \frac 1 2 \ln \left({ \frac {1 + x} {1 - x} }\right) + | c = [[Equivalence of Definitions of Real Inverse Hyperbolic Tangent#Definition 1 implies Definition 2|Definition 1 implies Definition 2]] +}} +{{eqn | n = 2 + | l = y = \frac 1 2 \ln \left({ \frac {1 + x} {1 - x} }\right) + | o = \implies + | r = x = \tanh \left({y}\right) + | c = [[Equivalence of Definitions of Real Inverse Hyperbolic Tangent#Definition 2 implies Definition 1|Definition 2 implies Definition 1]] +}} +{{eqn | ll = \implies + | l = x = \tanh \left({y}\right) + | o = \iff + | r = y = \frac 1 2 \ln \left({ \frac {1 + x} {1 - x} }\right) + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Square Root is Strictly Increasing} +Tags: Real Numbers, Square Roots + +\begin{theorem} +The [[Definition:Positive Square Root|positive square root function]] is [[Definition:Strictly Increasing Real Function|strictly increasing]], that is: +:$ \forall x,y \in \R_{>0}: x < y \implies \sqrt x < \sqrt y$ +\end{theorem} + +\begin{proof} +Let $x$ and $y$ be [[Definition:Positive/Real Number|positive real numbers]] such that $x < y$. +{{AimForCont}} $\sqrt x \ge \sqrt y$. +{{begin-eqn}} +{{eqn | n = 1 + | l = \sqrt x + | o = \ge + | r = \sqrt y + | c = +}} +{{eqn | n = 2 + | l = \sqrt x + | o = \ge + | r = \sqrt y + | c = +}} +{{eqn | l = x + | o = \ge + | r = y + | c = [[Definition:Real Number Axioms|Real Number Axioms: $\R O2$]]: [[Definition:Relation Compatible with Operation|compatibility]] with [[Definition:Real Multiplication|multiplication]], $(1) \times (2)$ +}} +{{end-eqn}} +Thus a [[Proof by Contradiction|contradiction]] is created. +Therefore: +:$\forall x, y \in \R_{>0}: x < y \implies \sqrt x < \sqrt y$ +{{qed}} +[[Category:Real Numbers]] +[[Category:Square Roots]] +fdvd6p7g0m570jrvlsjkqp533pis13w +\end{proof}<|endoftext|> +\section{Minimum of Real Hyperbolic Cosine Function} +Tags: Hyperbolic Cosine Function + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +Then: +:$\cosh x \ge 1$ +where $\cosh$ denotes the [[Definition:Hyperbolic Cosine|hyperbolic cosine function]]. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cosh^2 x - \sinh^2 x + | r = 1 + | c = [[Difference of Squares of Hyperbolic Cosine and Sine]] +}} +{{eqn | ll= \leadsto + | l = \cosh^2 x + | r = 1 + \sinh^2 x + | c = +}} +{{eqn | o = \ge + | r = 1 + | c = [[Square of Real Number is Non-Negative]] +}} +{{end-eqn}} +Furthermore, $\cosh x = 1$ when $x = 0$, satisfying the equality case. +{{qed}} +[[Category:Hyperbolic Cosine Function]] +1c4szrr57lywuqxqhmd77xy3o014eh0 +\end{proof}<|endoftext|> +\section{Exponential of Real Number is Strictly Positive} +Tags: Exponential Function, Exponential of Real Number is Strictly Positive + +\begin{theorem} +Let $x$ be a [[Definition:Real Number|real number]]. +Let $\exp$ denote the [[Definition:Real Exponential Function|(real) exponential function]]. +Then: +:$\forall x \in \R : \exp x > 0$ +\end{theorem} + +\begin{proof} +This proof assumes the [[Definition:Exponential Function/Real/Sum of Series|series definition of $\exp$]]. +That is, let: +:$\ds \exp x = \sum_{n \mathop = 0}^\infty \dfrac {x^n} {n!}$ +First, suppose $0 < x$. +Then: +{{begin-eqn}} +{{eqn | l = 0 + | o = < + | r = x^n + | c = [[Power Function is Strictly Increasing over Positive Reals/Natural Exponent|Power Function is Strictly Increasing over Positive Reals: Natural Exponent]] +}} +{{eqn | ll= \leadsto + | l = 0 + | o = < + | r = \frac {x^n} {n!} + | c = [[Real Number Ordering is Compatible with Multiplication]] +}} +{{eqn | ll= \leadsto + | l = 0 + | o = < + | r = \sum_{n \mathop = 0}^\infty \frac {x^n} {n!} + | c = [[Ordering of Series of Ordered Sequences]] +}} +{{eqn | ll= \leadsto + | l = 0 + | o = < + | r = \exp x + | c = Definition of $\exp$ +}} +{{end-eqn}} +So $\exp$ is [[Definition:Strictly Positive Real Function|strictly positive]] on $\R_{>0}$. +From [[Exponential of Zero]], $\exp 0 = 1$. +Finally, suppose that $x < 0$. +Then: +{{begin-eqn}} +{{eqn | l = 0 + | o = < + | r = -x + | c = [[Order of Real Numbers is Dual of Order of their Negatives]] +}} +{{eqn | ll= \leadsto + | l = 0 + | o = < + | r = \map \exp {-x} + | c = from above +}} +{{eqn | ll= \leadsto + | l = 0 + | o = < + | r = \frac 1 {\exp x} + | c = [[Reciprocal of Real Exponential]] +}} +{{eqn | ll= \leadsto + | l = 0 + | o = < + | r = \exp x + | c = [[Ordering of Reciprocals]] +}} +{{end-eqn}} +So $\exp$ is [[Definition:Strictly Positive Real Function|strictly positive]] on $\R_{<0}$. +Hence the result. +{{qed}} +\end{proof} + +\begin{proof} +This proof assumes the [[Definition:Exponential Function/Real/Limit of Sequence|limit definition of $\exp$]]. +That is, let: +:$\displaystyle \exp x = \lim_{n \mathop \to \infty} \map {f_n} x$ +where $\map {f_n} x = \paren {1 + \dfrac x n}^n$ +First, fix $x \in \R$. +Let $N = \ceiling {\size x}$, where $\ceiling {\, \cdot \,}$ denotes the [[Definition:Ceiling Function|ceiling function]]. +Then: +{{begin-eqn}} +{{eqn | l = \exp x + | r = \lim_{n \mathop \to \infty} \map {f_n} x +}} +{{eqn | r = \lim_{n \mathop \to \infty} \map {f_{n + N} } x + | c = [[Tail of Convergent Sequence]] +}} +{{eqn | o = \ge + | r = \map {f_{n + N} } x + | c = [[Exponential Sequence is Eventually Increasing]] and [[Limit of Bounded Convergent Sequence is Bounded]] +}} +{{eqn | o = > + | r = 0 + | c = [[Exponential Sequence is Eventually Increasing|Corollary to Exponential Sequence is Eventually Increasing]] +}} +{{end-eqn}} +{{MissingLinks|[[Exponential Sequence is Eventually Increasing|Corollary to Exponential Sequence is Eventually Increasing]] does not actually exist. The page it gets sent to does not give that result.}} +{{qed}} +\end{proof} + +\begin{proof} +This proof assumes the [[Definition:Exponential Function/Real/Extension of Rational Exponential|definition of $\exp x$ as the unique continuous extension of $e^x$]]. +Since $e > 0$, the result follows immediately from [[Power of Positive Real Number is Positive/Rational Number|Power of Positive Real Number is Positive over Rationals]]. +{{qed}} +\end{proof} + +\begin{proof} +This proof assumes the [[Definition:Exponential Function/Real/Inverse of Natural Logarithm|definition of $\exp$ as the inverse mapping of extension of $\ln$]], where $\ln$ denotes the [[Definition:Natural Logarithm|natural logarithm]]. +Recall that the [[Definition:Domain of Mapping|domain]] of $\ln$ is $\R_{>0}$. +From the definition of [[Definition:Inverse of Mapping|inverse mapping]], the [[Definition:Image of Mapping|image]] of $\exp$ is the [[Definition:Domain of Mapping|domain]] of $\ln$. +That is, the [[Definition:Image of Mapping|image]] of $\exp$ is $\R_{>0}$. +Hence the result. +{{qed}} +\end{proof} + +\begin{proof} +This proof assumes the [[Definition:Exponential Function/Real/Differential Equation|definition of $\exp$ as the solution to an initial value problem]]. +That is, suppose $\exp$ satisfies: +:$ (1): \quad D_x \exp x = \exp x$ +:$ (2): \quad \map \exp 0 = 1$ +on $\R$. +=== [[Exponential of Real Number is Strictly Positive/Proof 5/Lemma|Lemma]] === +{{:Exponential of Real Number is Strictly Positive/Proof 5/Lemma}}{{qed|lemma}} +{{AimForCont}} that $\exists \alpha \in \R: \exp \alpha < 0$. +Then $0 \in \openint {\exp \alpha} 1$. +From [[Intermediate Value Theorem]]: +:$\exists \zeta \in \openint \alpha 0: \map f \zeta = 0$ +This [[Definition:Contradiction|contradicts]] the [[Exponential of Real Number is Strictly Positive/Proof 5/Lemma|lemma]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Real Power of Strictly Positive Real Number is Strictly Positive} +Tags: Real Analysis + +\begin{theorem} +Let $x$ be a [[Definition:Strictly Positive Real Number|strictly positive real number]]. +Let $y$ be a [[Definition:Real Number|real number]]. +Then: +:$x^y > 0$ +where $x^y$ denotes $x$ [[Definition:Power to Real Number|raised to the $y$th power]]. +\end{theorem} + +\begin{proof} +From the definition of [[Definition:Power to Real Number|power]]: +:$x^y = \exp \left({y \ln x}\right)$ +From [[Exponential of Real Number is Strictly Positive]]: +:$x^y = \exp \left({y \ln x}\right) > 0$ +{{qed}} +[[Category:Real Analysis]] +oa8bjlxwrg5kas1mcyixm085z7x58vm +\end{proof}<|endoftext|> +\section{Derivative of Power of Function} +Tags: Differential Calculus, Derivative of Power of Function + +\begin{theorem} +Let $\map u x$ be a [[Definition:Differentiable Real Function|differentiable real function]] of $x$. +Let $n$ be a [[Definition:Real Number|real number]] such that $n \ne -1$. +Then: +:$\map {\dfrac \d {\d x} } {\map u x^n} = n \map u x^{n - 1} \map {\dfrac \d {\d x} } {\map u x}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\dfrac \d {\d x} } {\map u x^n} + | r = \lim_{h \mathop \to 0} \frac {\paren {\map u {x + h} }^n - \paren {\map u x}^n} h + | c = +}} +{{eqn | r = \paren {\map u x}^n \lim_{h \mathop \to 0} \frac {\paren {\frac {\map u {x + h} } {\map u x} }^n - 1} h + | c = [[Exponent Combination Laws/Power of Product]] +}} +{{eqn | r = \paren {\map u x}^n \lim_{h \mathop \to 0} \frac {\exp \paren {n \ln \frac {\map u {x + h} } {\map u x} } - 1} h + | c = {{Defof|Power to Real Number}} +}} +{{eqn | r = \paren {\map u x}^n \lim_{h \mathop \to 0} \paren {\frac {\map \exp {n \ln \frac {\map u {x + h} } {\map u x} } - 1} {n \ln \frac {\map u {x + h} } {\map u x} } } \paren {\frac {n \ln \frac {\map u {x + h} } {\map u x} } h} + | c = +}} +{{eqn | r = \paren {\map u x}^n \lim_{h \mathop \to 0} \frac {n \ln \frac {\map u {x + h} } {\map u x} } h + | c = [[Derivative of Exponential at Zero]] +}} +{{eqn | r = n \paren {\map u x}^n \lim_{h \mathop \to 0} \frac {\ln \frac {\map u {x + h} } {\map u x} } h + | c = +}} +{{eqn | r = n \paren {\map u x}^n \lim_{h \mathop \to 0} \frac {\map \ln {1 + \frac {\map u {x + h} - \map u x} {\map u x} } } h + | c = +}} +{{eqn | r = n \paren {\map u x}^n \lim_{h \mathop \to 0} \paren {\frac {\map \ln {1 + \frac {\map u {x + h} - \map u x} {\map u x} } } {\frac {\map u {x + h} - \map u x} {\map u x} } } \paren {\frac {\frac {\map u {x + h} - \map u x} {\map u x} } h } + | c = +}} +{{eqn | r = n \paren {\map u x}^n \lim_{h \mathop \to 0} \frac {\paren {\frac {\map u {x + h} - \map u x} {\map u x} } } h + | c = [[Derivative of Logarithm at One]] +}} +{{eqn | r = n \paren {\map u x}^n \lim_{h \mathop \to 0} \frac 1 {\map u x} \frac {\map u {x + h} - \map u x} h + | c = +}} +{{eqn | r = n \paren {\map u x}^{n - 1} \lim_{h \mathop \to 0} \frac {\map u {x + h} - \map u x} h + | c = [[Exponent Combination Laws/Product of Powers]] +}} +{{eqn | r = n \paren {\map u x}^{n - 1} \map {\dfrac \d {\d x} } {\map u x} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\frac \d {\d x} } {\map u x^n} + | r = \map {\frac \d {\d u} } {\map u x^n} \map {\frac \d {\d x} } {\map u x} + | c = [[Chain Rule for Derivatives]] +}} +{{eqn | r = n \map u x^{n - 1} \map {\frac {\d u} {\d x} } {\map u x} + | c = [[Derivative of Hyperbolic Sine]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Natural Logarithm of e is 1} +Tags: Examples of Natural Logarithms + +\begin{theorem} +:$\ln e = 1$ +\end{theorem} + +\begin{proof} +The [[Definition:Euler's Number/Base of Logarithm|definition of the Euler's number as the Base of Logarithm]] will be used. +Then the result follows directly. +{{qed}} +\end{proof}<|endoftext|> +\section{Real Inverse Hyperbolic Cosine is Strictly Increasing} +Tags: Inverse Hyperbolic Cosine + +\begin{theorem} +The [[Definition:Real Inverse Hyperbolic Cosine|real inverse hyperbolic cosine]] function is [[Definition:Strictly Increasing Real Function|strictly increasing]], that is: +:$\forall x, y \ge 1 : x < y \implies \cosh^{-1} x < \cosh^{-1} y$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | n = 1 + | l = x + | o = < + | r = y + | c = Assumption +}} +{{eqn | ll= \leadsto + | l = x^2 + | o = < + | r = y^2 + | c = [[Definition:Real Number Axioms|Axiom $\R \text O 2$]]: [[Definition:Usual Ordering|Usual ordering]] is [[Definition:Relation Compatible with Operation|compatible]] with [[Definition:Real Multiplication|multiplication]] +}} +{{eqn | ll= \leadsto + | l = x^2 - 1 + | o = < + | r = y^2 - 1 + | c = +}} +{{eqn | n = 2 + | ll= \leadsto + | l = \sqrt {x^2 - 1} + | o = < + | r = \sqrt {y^2 - 1} + | c = [[Square Root is Strictly Increasing]] +}} +{{eqn | ll= \leadsto + | l = x + \sqrt {x^2 - 1} + | o = < + | r = y + \sqrt {y^2 - 1} + | c = $(1) + (2)$ +}} +{{eqn | ll= \leadsto + | l = \cosh^{-1} x + | o = < + | r = \cosh^{-1} y + | c = {{Defof|Real Inverse Hyperbolic Cosine}} +}} +{{end-eqn}} +{{qed}} +[[Category:Inverse Hyperbolic Cosine]] +8erlqsh2xzkc6eesg1sz2xfizza49i3 +\end{proof}<|endoftext|> +\section{Laplace Transform of Constant Multiple} +Tags: Laplace Transforms + +\begin{theorem} +Let $a \in \C$ or $\R$ be [[Definition:Constant|constant]]. +Then: +:$a \laptrans {\map f {a t} } = \map F {\dfrac s a}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = a \laptrans {\map f {a t} } + | r = a \int_0^{\to + \infty} e^{-s t} \map f {a t} \rd t + | c = {{Defof|Laplace Transform}} +}} +{{eqn | r = a \paren {\frac 1 a} \int_0^{\to + \infty} e^{-s t} \map f {a t} \rd \paren {a t} + | c = [[Primitive of Function of Constant Multiple]] +}} +{{eqn | r = \int_0^{\to + \infty} e^{-u a t} \map f {a t} \rd \paren {a t} + | c = where $u = \dfrac s a$ +}} +{{eqn | r = \int_0^{\to + \infty} e^{-u a t} \map f {a t} \rd \paren {a t} +}} +{{eqn | r = \map F u + | c = {{Defof|Laplace Transform}} +}} +{{eqn | r = \map F {\dfrac s a} +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Laplace Transform of Function of t minus a} +Tags: Laplace Transforms, Laplace Transform of Function of t minus a + +\begin{theorem} +Let $g$ be the [[Definition:Function|function]] defined as: +:$\map g t = \begin{cases} \map f {t - a} & : t > a \\ 0 & : t \le a \end{cases}$ +Then: +:$\laptrans {\map g t} = e^{-a s} \map F s$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \laptrans {\map f {t - a} } + | r = \int_0^{\to + \infty} e^{-s t} \map f {t - a} \rd t + | c = {{Defof|Laplace Transform}} +}} +{{eqn | r = \int_0^{\to + \infty} e^{-s \paren {t - a} } e^{-a s} \map f {t - a} \rd \paren {t - a} + | c = +}} +{{eqn | r = e^{-a s} \int_0^{\to + \infty} e^{-s \paren {t - a} } \map f {t - a} \rd \paren {t - a} + | c = +}} +{{eqn | r = e^{-a s}\map F s + | c = {{Defof|Laplace Transform}} +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \laptrans {\map g t} + | r = \int_0^\infty e^{-s t} \map g t \rd t + | c = {{Defof|Laplace Transform}} +}} +{{eqn | r = \int_0^a e^{-s t} \map g t \rd t + \int_a^\infty e^{-s t} \map g t \rd t + | c = +}} +{{eqn | r = \int_0^a 0 \times e^{-s t} \rd t + \int_a^\infty e^{-s t} \map f {t - a} \rd t + | c = Definition of $\map g t$ +}} +{{eqn | r = \int_a^\infty e^{-s t} \map f {t - a} \rd t + | c = +}} +{{eqn | r = \int_0^\infty e^{-s \paren {u + a} } \map f u \rd u + | c = [[Integration by Substitution]]: $t = u + a$ +}} +{{eqn | r = e^{-a s} \int_0^\infty e^{-s u} \map f u \rd u + | c = +}} +{{eqn | r = e^{-a s} \map F s + | c = {{Defof|Laplace Transform}} +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Seifert-van Kampen Theorem} +Tags: Category Theory + +\begin{theorem} +The functor $\pi_1 : \mathbf{Top_\bullet} \to \mathbf{Grp}$ preserves pushouts of inclusions. +\end{theorem} + +\begin{proof} +Let $\left({X, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $U_1, U_2 \in \tau$ such that: +: $U_1 \cup U_2 = X$ +: $U_1 \cap U_2 \ne \varnothing$ is connected +Let $\ast \in U_1 \cap U_2$. +Let: +: $i_k : U_1 \cap U_2 \hookrightarrow U_k$ +: $j_k : U_k \hookrightarrow U_1 \cup U_2$ +be inclusions. +For the sake of simplicity let: +:$\pi_1 \left({X}\right) = \pi_1 \left({X, \ast}\right)$ +It is to be shown that $\pi_1 \left(X\right)$ is the [[Definition:Amalgamated Free Product|amalgamated free product]]: +:$\pi_1 \left({U_1}\right) *_{\pi_1 \left({U_1 \cap U_2}\right)} \pi_1 \left({U_2}\right)$ +{{ProofWanted}} +{{Namedfor|Karl Johannes Herbert Seifert|name2 = Egbert Rudolf van Kampen|cat = Seifert|cat2 = van Kampen}} +[[Category:Category Theory]] +jwn0lbhl81z1yclqkyw9y203rdwherz +\end{proof}<|endoftext|> +\section{Functions of Independent Random Variables are Independent} +Tags: Independent Random Variables + +\begin{theorem} +Let $X$ and $Y$ be [[Definition:Independent Random Variables|independent random variables]] on a [[Definition:Probability Space|probability space]] $\struct {\Omega, \Sigma, \Pr}$. +Let $g$ and $h$ be [[Definition:Real-Valued Function|real-valued functions]] defined on the [[Definition:Codomain of Mapping|codomains]] of $X$ and $Y$ respectively. +Then $\map g X$ and $\map h Y$ are [[Definition:Independent Random Variables|independent random variables]]. +\end{theorem} + +\begin{proof} +Let $A$ and $B$ be [[Definition:Subset|subsets]] of the [[Definition:Real Number|real numbers]] $\R$. +Let $g^{-1} \sqbrk A$ and $h^{-1} \sqbrk B$ denote the [[Definition:Preimage of Subset under Mapping|preimages]] of $A$ and $B$ under $g$ and $h$ respectively. +Applying the definition of [[Definition:Independent Random Variables|independent random variables]]: +{{begin-eqn}} +{{eqn | l = \map \Pr {\map g X \in A, \map h Y \in B} + | r = \map \Pr {X \in g^{-1} \sqbrk A, Y \in h^{-1} \sqbrk B} + | c = {{Defof|Preimage of Subset under Mapping}} +}} +{{eqn | r = \map \Pr {X \in g^{-1} \sqbrk A} \map \Pr {Y \in h^{-1} \sqbrk B} + | c = {{Defof|Independent Random Variables}} +}} +{{eqn | r = \map \Pr {\map g X \in A} \map \Pr {\map h Y \in B} + | c = {{Defof|Preimage of Subset under Mapping}} +}} +{{end-eqn}} +Hence $\map g X$ and $\map h Y$ are [[Definition:Independent Random Variables|independent random variables]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Multiplication Property of Characteristic Functions} +Tags: Probability Theory + +\begin{theorem} +Let $X$ and $Y$ be [[Definition:Independent Random Variables|independent random variables]] on a [[Definition:Probability Space|probability space]] $\struct {\Omega, \Sigma, \Pr}$. +Let $\phi_X$ and $\phi_Y$ denote the [[Definition:Characteristic Function of Random Variable|characteristic functions]] of $X$ and $Y$ respectively. +Then: +:$\phi_{X + Y} = \phi_X \phi_Y$ +\end{theorem} + +\begin{proof} +Let $i = \sqrt{-1}$. +Let $\expect X$ denote the [[Definition:Expectation|expectation]] of $X$. +{{begin-eqn}} +{{eqn | l = \map {\phi_{X + Y} } t + | r = \expect {e^{i t \paren {X + Y} } } + | c = {{Defof|Characteristic Function of Random Variable}} +}} +{{eqn | r = \expect {e^{i t X} e^{i t Y} } + | c = +}} +{{eqn | r = \expect {e^{i t X} } \expect {e^{ i t Y} } + | c = [[Functions of Independent Random Variables are Independent]], [[Expected Value of Product is Product of Expected Value]] +}} +{{eqn | r = \map {\phi_X} t \map {\phi_Y} t + | c = +}} +{{end-eqn}} +Hence: +:$\phi_{X + Y} = \phi_X \phi_Y$ +{{qed}} +\end{proof}<|endoftext|> +\section{Relationship between Limit Inferior and Lower Limit} +Tags: Topology + +\begin{theorem} +Let $\struct {S, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let $f: S \to \R \cup \set {-\infty, \infty}$ be an [[Definition:Extended Real-Valued Function|extended real-valued function]]. +Let $\sequence {s_n}_{n \mathop \in \N}$ be a [[Definition:Convergent Sequence|convergent sequence]] in $S$ such that $s_n \to \bar s$. +Then the [[Definition:Lower Limit (Topological Space)|lower limit]] of $f$ at $\bar s$ is [[Definition:Bounded Above Real-Valued Function|bounded above]] by the [[Definition:Limit Inferior|limit inferior]] of $\sequence {\map f {s_n} }$: +:$\displaystyle \liminf_{s \mathop \to \bar s} \map f s \le \liminf_{n \mathop \to \infty} \map f {s_n}$ +\end{theorem} + +\begin{proof} +Let $\NN_{\bar s}$ denote the [[Definition:Neighborhood Filter|neighborhood filter]] of $\bar s$. +By definition of the [[Definition:Lower Limit (Topological Space)|lower limit]], there exists a [[Definition:Sequence|sequence]] of [[Definition:Neighborhood (Topology)|open neighborhoods]] $\sequence {V_k}_{k \mathop \in \N} \in \NN_{\bar s}$ such that: +:$\displaystyle \lim_{k \mathop \to \infty} \set {\inf_{s \mathop \in V_k} \map f s} = \liminf_{s \mathop \to \bar s} \map f s$ +This implies that $\forall \epsilon > 0 \exists k_\epsilon \in \N$ such that: +:$\displaystyle \inf_{s \mathop \in V_{k_\epsilon} } \map f s \ge \liminf_{s \mathop \to \bar s} \map f s - \epsilon$ +By our hypothesis $s_n \to \bar s$ and because $V_{k_\epsilon} \in N_{\bar s}$ there exists $\map N {k_\epsilon} \in \N$ such that: +:$\displaystyle \forall n \ge \map N {k_\epsilon}: s_n \in V_{k_\epsilon}$ +Consequently: +{{begin-eqn}} +{{eqn | o = \le + | l = \inf_{s \mathop \in V_{k_\epsilon} } \map f s + | r = \inf_{n \mathop \ge \map N {k_\epsilon} } \map f {s_n} + | c = because $\set {s_n : n \ge \map N {k_\epsilon} } \subseteq V_{k_\epsilon}$ +}} +{{eqn | o = \le + | r = \sup_{N \mathop \in \N} \set {\inf_{n \mathop \ge N} \map f {s_n} } + | c = {{Defof|Supremum}} +}} +{{eqn | r = \liminf_{n \mathop \to \infty} \map f {s_n} + | c = {{Defof|Limit Inferior}} +}} +{{end-eqn}} +Combining these estimates, it follows that for all $\epsilon > 0$: +:$\displaystyle \liminf_{s \mathop \to \bar s} \map f s \le \liminf_{n \mathop \to \infty} \map f {s_n} + \epsilon$ +Hence the result. +{{qed}} +[[Category:Topology]] +gegmqs4w4gguk4x28m54zv8634zj0k9 +\end{proof}<|endoftext|> +\section{Group is Abelian iff Opposite Group is Itself} +Tags: Abelian Groups, Opposite Groups + +\begin{theorem} +Let $\left({G, \circ}\right)$ be a [[Definition:Group| group]]. +Let $\left({G, *}\right)$ be the [[Definition:Opposite Group|opposite group]] to $({G, \circ})$. +$\left({G, \circ}\right)$ is an [[Definition: Abelian Group|Abelian group]] {{iff}}: +:$\left({G, \circ}\right) = \left({G, *}\right)$ +\end{theorem} + +\begin{proof} +By definition of [[Definition:Opposite Group|opposite group]]: +:$(1): \quad \forall a, b \in G : a \circ b = b * a$ +=== Necessary Condition === +Let $\left({G, \circ}\right)$ be [[Definition: Abelian Group|Abelian]]. +Then: +{{begin-eqn}} +{{eqn | lo= \forall a, b \in G: + | l = a \circ b + | r = b \circ a +}} +{{eqn | ll= \implies + | l = a \circ b + | r = a * b + | c = Definition of [[Definition:Opposite Group|Opposite Group]] +}} +{{eqn | ll= \implies + | l = \left({G, \circ}\right) + | r = \left({G, *}\right) + | c = [[Equality of Algebraic Structures]] +}} +{{end-eqn}} +{{qed}} +=== Sufficient Condition === +Let $\left({G, \circ}\right) = \left({G, *}\right)$. +Then: +{{begin-eqn}} +{{eqn | lo= \forall a, b \in G: + | l = a \circ b + | r = a * b + | c = [[Equality of Algebraic Structures]] +}} +{{eqn | ll= \implies + | l = a \circ b + | r = b \circ a + | c = Definition of [[Definition:Opposite Group|Opposite Group]] +}} +{{end-eqn}} +Thus by definition $\left({G, \circ}\right)$ is an [[Definition: Abelian Group|Abelian group]]. +{{qed}} +[[Category:Abelian Groups]] +[[Category:Opposite Groups]] +oryghvkg723af266g8uf3p8bh2rn1a8 +\end{proof}<|endoftext|> +\section{Sequence on Finite Product Space Converges to Point iff Projections Converge to Projections of Point} +Tags: Topology, Convergence, Sequences, Projections + +\begin{theorem} +Let $N \in \N$. +For all $k \in \set {1, \ldots, N}$, let $T_k = \struct {X_k, \tau_k}$ be [[Definition:Topological Space|topological spaces]]. +Let $\displaystyle X = \prod_{k \mathop = 1}^N X_k$ be the [[Definition:Cartesian Product|cartesian product]] of $X_1, \ldots, X_N$. +Let $\tau$ be the [[Definition:Product Topology|product topology]] on $X$. +Denote by $\pr_k : X \to X_k$ the [[Definition:Projection (Mapping Theory)|projection]] from $X$ onto $X_k$. +Let $\sequence {x_n}$ be a [[Definition:Sequence|sequence]] on $X$ and let $x \in X$. +Then $\sequence {x_n}$ [[Definition:Convergent Sequence (Topology)|converges]] to $x$ {{iff}}: +:for all $k \in \set {1, \ldots, N}$ the [[Definition:Sequence|sequence]] $\sequence {\map {\pr_k} {x_n} }$ [[Definition:Convergent Sequence (Topology)|converges]] to $\map {\pr_k} x$. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Let $x_n \to x$. +Let $k \in \set {1, \ldots, N}$. +From [[Projection from Product Topology is Continuous]] it follows that $\pr_k$ is continuous. +By [[Continuous Mapping is Sequentially Continuous]], $\pr_k$ is also [[Definition:Sequential Continuity|sequentially continuous]]. +Hence $\map {\pr_k} {x_n} \to \map {\pr_k} x$. +{{qed|lemma}} +=== Sufficient Condition === +Let $\map {\pr_k} {x_n} \to \map {\pr_k} x$ for all $k \in \set {1, \ldots, N}$. +Let $U \in \tau$ be an [[Definition:Open Neighborhood|open neighborhood]] of $x$. +By definition of the [[Definition:Product Topology|product topology]] and [[Synthetic Basis and Analytic Basis are Compatible]] it follows that: +:$\BB := \set {U_1 \times U_2 \times \cdots \times U_N : \forall k \in \set {1, \ldots, N} : U_k \in \tau_k}$ +is an [[Definition:Analytic_Basis|analytic basis]] for $\tau$. +Hence there exists an [[Definition:Indexed Set|index set]] $I$ such that: +:$\displaystyle U = \bigcup_{i \mathop \in I} \paren {U_{1, i} \times \cdots \times U_{N, i} }$ +where $U_{k, i} \in \tau_k$ for all $i \in I, k \in \set {1, \ldots, N}$. +As $x \in U$ it follows that there exists $i_0 \in I$ such that: +:$\displaystyle x \in U_{1, i_0} \times \cdots \times U_{N, i_0}$ +By our hypothesis $\map {\pr_k} {x_n} \to \map {\pr_k} x$ it follows that: +:$\forall k \in \set{1, \ldots, N}: \exists M_k \in \N : \forall n \ge M_k: \map {\pr_k} {x_n} \in U_{k, i_0}$ +Thus for all $n \ge M := \max \set {M_1, \dotsc, M_N}$ it holds that: +:$ x_n = \tuple {\map {\pr_1} {x_n}, \dotsc, \map {\pr_N} {x_n} } \in U_{1,i_0} \times \cdots \times U_{N, i_0} \subset U$ +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Real Convergent Sequence is Cauchy Sequence} +Tags: Convergence, Real Analysis, Cauchy Sequences, Real Convergent Sequence is Cauchy Sequence + +\begin{theorem} +Every [[Definition:Convergent Real Sequence|convergent real sequence]] in $\R$ is a [[Definition:Real Cauchy Sequence|Cauchy sequence]]. +\end{theorem} + +\begin{proof} +Let $\sequence {x_n}$ be a [[Definition:Sequence|sequence]] in $\R$ that [[Definition:Convergent Real Sequence|converges]] to the [[Definition:Limit of Real Sequence|limit]] $l \in \R$. +Let $\epsilon > 0$. +Then also $\dfrac \epsilon 2 > 0$. +Because $\sequence {x_n}$ [[Definition:Convergent Real Sequence|converges]] to $l$, we have: +:$\exists N: \forall n > N: \size {x_n - l} < \dfrac \epsilon 2$ +So if $m > N$ and $n > N$, then: +{{begin-eqn}} +{{eqn | l = \size {x_n - x_m} + | r = \size {x_n - l + l - x_m} +}} +{{eqn | o = \le + | r = \size {x_n - l} + \size {l - x_m} + | c = [[Triangle Inequality]] +}} +{{eqn | o = < + | r = \frac \epsilon 2 + \frac \epsilon 2 + | c = by choice of $N$ +}} +{{eqn | r = \epsilon + | c = +}} +{{end-eqn}} +Thus $\sequence {x_n}$ is a [[Definition:Real Cauchy Sequence|Cauchy sequence]]. +{{qed}} +\end{proof} + +\begin{proof} +From [[Real Number Line is Complete Metric Space]], $\R$ under the [[Definition:Usual Metric|usual metric]] is a [[Definition:Metric Space|metric space]]. +The result then follows as a special case of [[Convergent Sequence in Metric Space is Cauchy Sequence]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Real Sequence is Cauchy iff Convergent} +Tags: Convergence, Real Analysis, Cauchy Sequences + +\begin{theorem} +Let $\sequence {a_n}$ be a [[Definition:Real Sequence|sequence in $\R$]]. +Then $\sequence {a_n}$ is a [[Definition:Real Cauchy Sequence|Cauchy sequence]] {{iff}} it is [[Definition:Convergent Real Sequence|convergent]]. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +This is demonstrated in [[Cauchy Sequence Converges on Real Number Line]]. +{{qed|lemma}} +=== Sufficient Condition === +This is demonstrated in [[Real Convergent Sequence is Cauchy Sequence]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Complex Sequence is Cauchy iff Convergent} +Tags: Convergent Complex Sequences, Cauchy Sequences, Complex Sequence is Cauchy iff Convergent + +\begin{theorem} +Let $\sequence {z_n}$ be a [[Definition:Complex Sequence|complex sequence]]. +Then $\sequence {z_n}$ is a [[Definition:Cauchy Sequence|Cauchy sequence]] {{iff}} it is [[Definition:Convergent Complex Sequence|convergent]]. +\end{theorem} + +\begin{proof} +=== [[Complex Sequence is Cauchy iff Convergent/Lemma 1|Lemma]] === +{{:Complex Sequence is Cauchy iff Convergent/Lemma 1}} +Let $\sequence {x_n}$ be a [[Definition:Real Sequence|real sequence]] where: +:$x_n = \Re \paren {z_n}$ for every $n$ +:$\Re \paren {z_n}$ is the [[Definition:Real Part|real part]] of $z_n$ +Let $\sequence {y_n}$ be a [[Definition:Real Sequence|real sequence]] where +:$y_n = \Im \paren {z_n}$ for every $n$ +:$\Im \paren {z_n}$ is the [[Definition:Imaginary Part|imaginary part]] of $z_n$ +We find: +:$\sequence {z_n}$ is a [[Definition:Complex Cauchy Sequence|Cauchy sequence]] +:$\iff \sequence {x_n}$ and $\sequence {y_n}$ are [[Definition:Real Cauchy Sequence|Cauchy sequences]] by [[Complex Sequence is Cauchy iff Convergent/Lemma 1|Lemma]] +:$\iff \sequence {x_n}$ and $\sequence {y_n}$ are [[Definition:Convergent Real Sequence|convergent]] by [[Real Sequence is Cauchy iff Convergent]] +:$\iff \sequence {z_n}$ is [[Definition:Convergent Complex Sequence|convergent]] by definition of [[Definition:Convergent Complex Sequence|convergent complex sequence]] +{{qed}} +\end{proof} + +\begin{proof} +=== [[Complex Sequence is Cauchy iff Convergent/Lemma 1|Lemma]] === +{{:Complex Sequence is Cauchy iff Convergent/Lemma 1}} +Let $\sequence {x_n}$ be a [[Definition:Real Sequence|real sequence]] where: +:$x_n = \Re \paren {z_n}$ for every $n$ +:$\Re \paren {z_n}$ is the [[Definition:Real Part|real part]] of $z_n$ +Let $\sequence {y_n}$ be a [[Definition:Real Sequence|real sequence]] where: +:$y_n = \Im \paren {z_n}$ for every $n$ +:$\Im \paren {z_n}$ is the [[Definition:Imaginary Part|imaginary part]] of $z_n$ +=== Necessary Condition === +Let $\sequence {z_n}$ be a [[Definition:Cauchy Sequence|Cauchy sequence]]. +We aim to prove that $\sequence {z_n}$ is [[Definition:Convergent Complex Sequence|convergent]]. +We find: +:$\sequence {z_n}$ is a [[Definition:Cauchy Sequence|Cauchy sequence]] +:$\implies \sequence {x_n}$ and $\sequence {y_n}$ are [[Definition:Real Cauchy Sequence|Cauchy sequences]] by [[Complex Sequence is Cauchy iff Convergent/Lemma 1|Lemma]] +:$\implies \sequence {x_n}$ and $\sequence {y_n}$ are [[Definition:Convergent Real Sequence|convergent]] by [[Real Sequence is Cauchy iff Convergent]] +:$\implies \sequence {z_n}$ is [[Definition:Convergent Complex Sequence|convergent]] by definition of [[Definition:Convergent Complex Sequence|convergent complex sequence]]. +{{qed|lemma}} +=== Sufficient Condition === +Let $\sequence {z_n}$ be [[Definition:Convergent Complex Sequence|convergent]]. +We aim to prove that $\sequence {z_n}$ is a [[Definition:Cauchy Sequence|Cauchy sequence]]. +We find: +:$\sequence {z_n}$ is [[Definition:Convergent Complex Sequence|convergent]] +:$\implies \sequence {x_n}$ and $\sequence {y_n} $ are [[Definition:Convergent Real Sequence|convergent]] by definition of [[Definition:Convergent Complex Sequence|convergent complex sequence]] +:$\implies \sequence {x_n}$ and $\sequence {y_n}$ are [[Definition:Real Cauchy Sequence|Cauchy sequences]] by [[Real Sequence is Cauchy iff Convergent]] +:$\implies \sequence {z_n}$ is a [[Definition:Cauchy Sequence|Cauchy sequence]] by [[Complex Sequence is Cauchy iff Convergent/Lemma 1|Lemma]] +{{qed}} +\end{proof}<|endoftext|> +\section{Sum of Arctangents} +Tags: Arctangent Function + +\begin{theorem} +:$\arctan a + \arctan b = \arctan \dfrac {a + b} {1 - a b}$ +where $\arctan$ denotes the [[Definition:Arctangent|arctangent]]. +\end{theorem} + +\begin{proof} +Let $x = \arctan a$ and $y = \arctan b$. +Then: +{{begin-eqn}} +{{eqn | n = 1 + | l = \tan x + | r = a + | c = +}} +{{eqn | n = 2 + | l = \tan y + | r = b + | c = +}} +{{eqn | l = \tan \left({\arctan a + \arctan b}\right) + | r = \tan \left({x + y}\right) + | c = +}} +{{eqn | r = \frac {\tan x + \tan y} {1 - \tan x \tan y} + | c = [[Tangent of Sum]] +}} +{{eqn | r = \frac {a + b} {1 - a b} + | c = by $(1)$ and $(2)$ +}} +{{eqn | ll = \implies + | l = \arctan a + \arctan b + | r = \arctan \frac {a + b} {1 - a b} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Difference of Arctangents} +Tags: Arctangent Function + +\begin{theorem} +:$\arctan a - \arctan b = \arctan \dfrac {a - b} {1 + a b}$ +where $\arctan$ denotes the [[Definition:Arctangent|arctangent]]. +\end{theorem} + +\begin{proof} +Let $x = \arctan a$ and $y = \arctan b$. +Then: +{{begin-eqn}} +{{eqn | n = 1 + | l = \tan x + | r = a + | c = +}} +{{eqn | n = 2 + | l = \tan y + | r = b + | c = +}} +{{eqn | l = \map \tan {\arctan a - \arctan b} + | r = \map \tan {x - y} + | c = +}} +{{eqn | r = \frac {\tan x - \tan y} {1 + \tan x \tan y} + | c = [[Tangent of Difference]] +}} +{{eqn | r = \frac {a - b} {1 + a b} + | c = by $(1)$ and $(2)$ +}} +{{eqn | ll= \leadsto + | l = \arctan a - \arctan b + | r = \arctan \frac {a - b} {1 + a b} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Sum of Arccotangents} +Tags: Arccotangent Function + +\begin{theorem} +:$\arccot a + \arccot b = \arccot \dfrac {a b - 1} {a + b}$ +where $\arccot$ denotes the [[Definition:Arccotangent|arccotangent]]. +\end{theorem} + +\begin{proof} +Let $x = \arccot a$ and $y = \arccot b$. +Then: +{{begin-eqn}} +{{eqn | n = 1 + | l = \cot x + | r = a + | c = +}} +{{eqn | n = 2 + | l = \cot y + | r = b + | c = +}} +{{eqn | l = \map \cot {\arccot a + \arccot b} + | r = \map \cot {x + y} + | c = +}} +{{eqn | r = \frac {\cot x \cot y - 1} {\cot x + \cot y} + | c = [[Cotangent of Sum]] +}} +{{eqn | r = \frac {a + b} {1 - a b} + | c = by $(1)$ and $(2)$ +}} +{{eqn | ll= \leadsto + | l = \arccot a + \arccot b + | r = \arccot \frac {a b - 1} {a + b} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Difference of Arccotangents} +Tags: Arccotangent Function + +\begin{theorem} +:$\arccot a - \arccot b = \arccot \dfrac {a b + 1} {a - b}$ +where $\arccot$ denotes the [[Definition:Arccotangent|arccotangent]]. +\end{theorem} + +\begin{proof} +Let $x = \arccot a$ and $y = \arccot b$. +Then: +{{begin-eqn}} +{{eqn | n = 1 + | l = \cot x + | r = a + | c = +}} +{{eqn | n = 2 + | l = \cot y + | r = b + | c = +}} +{{eqn | l = \map \cot {\arccot a - \arccot b} + | r = \map \cot {x - y} + | c = +}} +{{eqn | r = \frac {\cot x \cot y + 1} {\cot x - \cot y} + | c = [[Cotangent of Difference]] +}} +{{eqn | r = \frac {a - b} {1 + a b} + | c = by $(1)$ and $(2)$ +}} +{{eqn | ll= \leadsto + | l = \arccot a - \arccot b + | r = \arccot \frac {a b + 1} {a - b} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Multiple Angle Formula for Tangent} +Tags: Tangent Function + +\begin{theorem} +:$\displaystyle \tan \left({n \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\left\lfloor{\frac {n - 1} 2}\right\rfloor} \left({-1}\right)^i \binom n {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\left\lfloor{\frac n 2}\right\rfloor} \left({-1}\right)^i \binom n {2 i} \tan^{2 i}\theta}$ +\end{theorem} + +\begin{proof} +Proof by [[Principle of Mathematical Induction|induction]]: +For all $n \in \N_{\ge0}$, let $P \left({n}\right)$ be the [[Definition:Proposition|proposition]]: +:$\displaystyle \tan \left({n \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\left\lfloor{\frac {n - 1} 2}\right\rfloor} \left({-1}\right)^i \binom n {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\left\lfloor{\frac n 2}\right\rfloor} \left({-1}\right)^i \binom n {2 i} \tan^{2 i}\theta}$ +=== Basis for the Induction === +$P(0)$ is the case: +{{begin-eqn}} +{{eqn | o = + | r = \frac {\displaystyle \sum_{i \mathop = 0}^{-1} \left({-1}\right)^i \binom 0 {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^0 \left({-1}\right)^i \binom 0 {2 i} \tan^{2 i}\theta} + | c = +}} +{{eqn | r = \frac 0 {\displaystyle \sum_{i \mathop = 0}^0 \left({-1}\right)^i \binom 0 {2 i} \tan^{2 i}\theta} + | c = Because the [[Definition:Upper Bound|upper bound]] is smaller than the [[Definition:Lower Bound|lower bound]], this results in a [[Definition:Vacuous Summation|vacuous summation]] and thus is zero. +}} +{{eqn | r = \frac 0 {\left({-1}\right)^0 \dbinom 0 0 \tan^0 \theta} + | c = +}} +{{eqn | r = \frac 0 1 + | c = +}} +{{eqn | r = 0 + | c = +}} +{{eqn | r = \tan \left({0 \theta}\right) + | c = +}} +{{end-eqn}} +and so can be seen to hold. +$P(1)$ is the case: +{{begin-eqn}} +{{eqn | o = + | r = \frac {\displaystyle \sum_{i \mathop = 0}^0 \left({-1}\right)^i \binom 1 {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^0 \left({-1}\right)^i \binom 1 {2 i} \tan^{2 i}\theta} + | c = +}} +{{eqn | r = \frac {\left({-1}\right)^0 \dbinom 1 1 \tan^1 \theta} {\left({-1}\right)^0 \dbinom 1 0 \tan^0 \theta} + | c = +}} +{{eqn | r = \frac {\tan \theta} 1 + | c = +}} +{{eqn | r = \tan \theta + | c = +}} +{{end-eqn}} +and so is also seen to hold. +These two cases together form the [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $P \left({k-2}\right)$ and $P \left({k-1}\right)$ is true, where $k > 2$ is an even number, then it logically follows that $P \left({k}\right)$ and $P \left({k+1}\right)$ is true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$\displaystyle \tan \left({\left({k - 2}\right) \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta}$ +:$\displaystyle \tan \left({\left({k - 1}\right) \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 1} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 1} {2 i} \tan^{2 i}\theta}$ +Then we need to show: +:$\displaystyle \tan \left({k \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom k {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2} \left({-1}\right)^i \binom k {2 i} \tan^{2 i}\theta}$ +:$\displaystyle \tan \left({\left({k + 1}\right) \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2} \left({-1}\right)^i \binom {k + 1} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2} \left({-1}\right)^i \binom {k + 1} {2 i} \tan^{2 i}\theta}$ +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +For the first part: +{{begin-eqn}} +{{eqn | l = \tan \left({k \theta}\right) + | r = \tan \left({\left({k - 2}\right) \theta + 2 \theta}\right) + | c = +}} +{{eqn | r = \frac {\tan \left({\left({k - 2}\right) \theta}\right) + \tan \left({2 \theta}\right)} {1 - \tan \left({\left({k - 2}\right) \theta}\right) \tan \left({2 \theta}\right)} + | c = [[Tangent of Sum]] +}} +{{eqn | r = \frac {\frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta} + \tan \left({2 \theta}\right)} {1 - \frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta} \tan \left({2 \theta}\right)} + | c = By [[Multiple Angle Formula for Tangent#Induction Hypothesis|Induction Hypothesis]] +}} +{{eqn | r = \frac {\frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta} + \frac {\displaystyle 2 \tan \theta} {\displaystyle 1 - \tan^2 \theta} } {1 - \frac {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta} \frac {\displaystyle 2 \tan \theta} {\displaystyle 1 - \tan^2 \theta} } + | c = By [[Double Angle Formula for Tangent]] +}} +{{eqn | r = \frac {\left({\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta}\right) \left({\displaystyle 1 - \tan^2 \theta}\right) - \left({\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta}\right) \left({\displaystyle 2 \tan \theta}\right)} {\left({\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 1} \left({-1}\right)^i \binom {k - 2} {2 i} \tan^{2 i}\theta}\right) \left({\displaystyle 1 - \tan^2 \theta}\right) - \left({\displaystyle \sum_{i \mathop = 0}^{\frac k 2 - 2} \left({-1}\right)^i \binom {k - 2} {2 i + 1} \tan^{2 i + 1}\theta}\right) \left({\displaystyle 2 \tan \theta}\right)} + | c = +}} +{{end-eqn}} +For the second part: +{{finish}} +So $P \left({k-2}\right) \land P \left({k-1}\right) \implies P \left({k}\right) \land P \left({k+1}\right)$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\displaystyle \forall n \in \N: \tan \left({n \theta}\right) = \frac {\displaystyle \sum_{i \mathop = 0}^{\left\lfloor{\frac {n - 1} 2}\right\rfloor} \left({-1}\right)^i \binom n {2 i + 1} \tan^{2 i + 1}\theta} {\displaystyle \sum_{i \mathop = 0}^{\left\lfloor{\frac n 2}\right\rfloor} \left({-1}\right)^i \binom n {2 i} \tan^{2 i}\theta}$ +{{qed}} +\end{proof}<|endoftext|> +\section{Laplace Transform of Dirac Delta Function by Function} +Tags: Laplace Transforms, Dirac Delta Function + +\begin{theorem} +Let $\map f t: \R \to \R$ or $\R \to \C$ be a [[Definition:Function|function]]. +Let $\map \delta t$ denote the [[Definition:Dirac Delta Function|Dirac delta function]]. +Let $c$ be a [[Definition:Positive Real Number|positive]] [[Definition:Constant|constant]] [[Definition:Real Number|real number]]. +Let $\laptrans {\map f t} = \map F s$ denote the [[Definition:Laplace Transform|Laplace transform]] of $f$. +Then: +:$\laptrans {\map \delta {t - c} \, \map f t} = e^{- s c} \, \map f c$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \laptrans {\map \delta {t - c} \, \map f t} + | r = \int^{\to+\infty}_0 e^{-s t} \map \delta {t - c} \, \map f t \rd t + | c = {{Defof|Laplace Transform}} +}} +{{eqn | r = \int^{c^+}_{c^-} e^{-s t} \map \delta {t - c} \, \map f t \rd t + | c = Integrand elsewhere zero by {{Defof|Dirac Delta Function}} +}} +{{eqn | r = \int^{c^+}_{c^-} e^{-s c} \map \delta {t - c} \, \map f c \rd t + | c = $e^{-s t}$ and $\map f t$ are constant in [[Definition:Closed Real Interval|interval]] $\closedint {c^-} {c^+}$ +}} +{{eqn | r = e^{-s c} \, \map f c \int^{c^+}_{c^-} \map \delta {t - c} \rd t + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = e^{-s c} \, \map f c \int^{0^+}_{0^-} \map \delta {t - c} \rd \paren {t - c} + | c = [[Integration by Substitution]] +}} +{{eqn | r = e^{-s c} \, \map f c + | c = {{Defof|Dirac Delta Function}} +}} +{{end-eqn}} +{{qed}} +[[Category:Laplace Transforms]] +[[Category:Dirac Delta Function]] +g5ejsz48akqz5vky3llm1lvp3fvsg3r +\end{proof}<|endoftext|> +\section{Modulus of Complex Number equals its Distance from Origin} +Tags: Complex Analysis + +\begin{theorem} +The [[Definition:Complex Modulus|modulus]] of a [[Definition:Complex Number|complex number]] equals its [[Definition:Distance|distance]] from the [[Definition:Origin|origin]] on the [[Definition:Complex Plane|complex plane]]. +\end{theorem} + +\begin{proof} +Let $z = x + y i$ be a [[Definition:Complex Number|complex number]] and $O = 0 + 0 i$ be the [[Definition:Origin|origin]] on the [[Definition:Complex Plane|complex plane]]. +We have its [[Definition:Complex Modulus|modulus]]: +{{begin-eqn}} +{{eqn | l = \left\vert{z}\right\vert + | r = \left\vert{x + y i}\right\vert + | c = +}} +{{eqn | r = \sqrt {x^2 + y^2} + | c = Definition of [[Definition:Complex Modulus|Modulus]] +}} +{{end-eqn}} +and its [[Definition:Distance|distance]] from the [[Definition:Origin|origin]] on the [[Definition:Complex Plane|complex plane]]: +{{begin-eqn}} +{{eqn | l = d \left({z, O}\right) + | r = d \left({\left({x, y}\right), \left({0, 0}\right)}\right) + | c = +}} +{{eqn | r = \sqrt{\left({x - 0}\right)^2 + \left({y - 0}\right)^2} + | c = [[Distance Formula]] +}} +{{eqn | r = \sqrt {x^2 + y^2} + | c = +}} +{{end-eqn}} +The two are seen to be equal. +{{qed}} +{{link wanted|There may be a more directly relevant link to the distance formula based on the fact (which we've proved somewhere) that the complex plane is a metric space, and that the distance formula is that metric.}} +[[Category:Complex Analysis]] +qonf9e72mw078j2ed6wty07774jop2t +\end{proof}<|endoftext|> +\section{Empty Set is Countable} +Tags: Set Theory, Empty Set, Countable Sets + +\begin{theorem} +The [[Definition:Empty Set|empty set]] $\O$ is [[Definition:Countable Set|countable]]. +\end{theorem} + +\begin{proof} +By [[Axiom:Peano's Axioms|Peano's Axioms]], $\N_0 \sim \O$, where $\N_n$ denotes the [[Definition:Initial Segment of Natural Numbers|initial segment of natural number $n$]]. +By definition, $\O$ is [[Definition:Finite Set|finite]]. +By definition, $\O$ is a [[Definition:Countable Set/Definition 2|countable set]]. +{{qed}} +[[Category:Set Theory]] +[[Category:Empty Set]] +[[Category:Countable Sets]] +ly0tg8512aadn5cgyxnqkjdd9niomfe +\end{proof}<|endoftext|> +\section{Lindelöf's Lemma} +Tags: Real Analysis + +\begin{theorem} +Let $C$ be a [[Definition:Set|set]] of [[Definition:Open Set of Real Numbers|open real sets]]. +Let $S$ be a [[Definition:Real Number|real set]] that is [[Definition:Cover of Set|covered]] by $C$. +Then there exists a [[Definition:Countable Set|countable]] [[Definition:Subset|subset]] of $C$ that [[Definition:Cover of Set|covers]] $S$. +\end{theorem} + +\begin{proof} +=== [[Lindelöf's Lemma/Lemma|Lemma]] === +{{:Lindelöf's Lemma/Lemma|Lemma}} {{qed|lemma}} +We have that $S$ is [[Definition:Cover of Set|covered]] by $C$. +This means that $S$ is a [[Definition:Subset|subset]] of $\displaystyle \bigcup_{O \mathop \in C} O$. +From the [[Lindelöf's Lemma/Lemma|lemma]]: +:$\displaystyle \bigcup_{O \mathop \in D} O = \bigcup_{O \mathop \in C} O$ +where $D$ is a [[Definition:Countable Set|countable]] [[Definition:Subset|subset]] of $C$. +Hence $S$ is also a [[Definition:Subset|subset]] of $\displaystyle \bigcup_{O \mathop \in D} O$. +In other words, $S$ is [[Definition:Cover of Set|covered]] by $D$. +That is, $S$ is [[Definition:Cover of Set|covered]] by a [[Definition:Countable Set|countable]] [[Definition:Subset|subset]] of $C$. +This finishes the proof of the theorem. +{{qed}} +{{Namedfor|Ernst Leonard Lindelöf|cat = Lindelöf}} +[[Category:Real Analysis]] +ikufg8dfndbd59rgeksn2inb9wt94g5 +\end{proof}<|endoftext|> +\section{Area of Isosceles Triangle in terms of Sides} +Tags: Areas of Triangles, Isosceles Triangles + +\begin{theorem} +Let $\triangle ABC$ be an [[Definition:Isosceles Triangle|isosceles triangle]] whose [[Definition:Apex of Isosceles Triangle|apex]] is $A$. +Let $r$ be the [[Definition:Length (Linear Measure)|length]] of a [[Definition:Legs of Isosceles Triangle|leg]] of $\triangle ABC$. +Let $b$ be the [[Definition:Length (Linear Measure)|length]] of the [[Definition:Base of Isosceles Triangle|base]] of $\triangle ABC$. +Then the [[Definition:Area|area]] $\mathcal A$ of $\triangle ABC$ is given by: +:$\mathcal A = \dfrac b 4 \sqrt{4 r^2 - b^2}$ +\end{theorem} + +\begin{proof} +:[[File:IsoscelesTriangleArea.png|300px]] +Let $h$ be the [[Definition:Height of Triangle|height]] of $\triangle ABC$. +{{begin-eqn}} +{{eqn | l = \mathcal A + | r = \frac 1 2 b h + | c = [[Area of Triangle in Terms of Side and Altitude]] +}} +{{eqn | r = \frac b 2 \sqrt {r^2 - \left({\frac b 2}\right)^2} + | c = [[Pythagoras's Theorem]] +}} +{{eqn | r = \frac b 2 \sqrt {\frac {4 r^2 - b^2} 4} + | c = simplification +}} +{{eqn | r = \frac b 4 \sqrt {4 r^2 - b^2} + | c = simplification +}} +{{end-eqn}} +{{qed}} +[[Category:Areas of Triangles]] +[[Category:Isosceles Triangles]] +reh3cdnhcur5jglo3ano21wpmtummtc +\end{proof}<|endoftext|> +\section{Even Function Times Even Function is Even} +Tags: Even Functions + +\begin{theorem} +Let $X \subset \R$ be a [[Definition:Symmetric Set of Real Numbers|symmetric set of real numbers]]: +:$\forall x \in X: -x \in X$ +Let $f, g: X \to \R$ be two [[Definition:Even Function|even functions]]. +Let $f \cdot g$ denote the [[Definition:Pointwise Multiplication of Real-Valued Functions|pointwise product]] of $f$ and $g$. +Then $\paren {f \cdot g}: X \to \R$ is also an [[Definition:Even Function|even function]]. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\paren {f \cdot g} } {-x} + | r = \map f {-x} \cdot \map g {-x} + | c = {{Defof|Pointwise Multiplication of Real-Valued Functions}} +}} +{{eqn | r = \map f x \cdot \map g x + | c = {{Defof|Even Function}} +}} +{{eqn | r = \map {\paren {f \cdot g} } x + | c = {{Defof|Pointwise Multiplication of Real-Valued Functions}} +}} +{{end-eqn}} +Thus, by definition, $\paren {f \cdot g}$ is an [[Definition:Even Function|even function]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Odd Function Times Even Function is Odd} +Tags: Even Functions, Odd Functions + +\begin{theorem} +Let $X \subset \R$ be a [[Definition:Symmetric Set of Real Numbers|symmetric set of real numbers]]: +:$\forall x \in X: -x \in X$ +Let $f: X \to \R$ be an [[Definition:Odd Function|odd function]]. +Let $g: X \to \R$ be an [[Definition:Even Function|even function]]. +Let $f \cdot g$ denote the [[Definition:Pointwise Multiplication of Real-Valued Functions|pointwise product]] of $f$ and $g$. +Then $\paren {f \cdot g}: X \to \R$ is an [[Definition:Odd Function|odd function]]. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\paren {f \cdot g} } {-x} + | r = \map f {-x} \cdot \map g {-x} + | c = {{Defof|Pointwise Multiplication of Real-Valued Functions}} +}} +{{eqn | r = \paren {-\map f x} \cdot \map g x + | c = {{Defof|Odd Function}} and {{Defof|Even Function}} +}} +{{eqn | r = -\map f x \cdot \map g x + | c = +}} +{{eqn | r = -\map {\paren {f \cdot g} } x + | c = {{Defof|Pointwise Multiplication of Real-Valued Functions}} +}} +{{end-eqn}} +Thus, by definition, $\paren {f \cdot g}$ is an [[Definition:Odd Function|odd function]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Odd Function Times Odd Function is Even} +Tags: Even Functions, Odd Functions + +\begin{theorem} +Let $S \subset \R$ be a [[Definition:Symmetric Set of Real Numbers|symmetric set of real numbers]]: +:$\forall x \in S: -x \in X$ +Let $f, g: X \to \R$ be two [[Definition:Odd Function|odd functions]]. +Let $f \cdot g$ denote the [[Definition:Pointwise Multiplication of Real-Valued Functions|pointwise product]] of $f$ and $g$. +Then $\paren {f \cdot g}: S \to \R$ is an [[Definition:Even Function|even function]]. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\paren {f \cdot g} } {-x} + | r = \map f {-x} \cdot \map g {-x} + | c = {{Defof|Pointwise Multiplication of Real-Valued Functions}} +}} +{{eqn | r = \paren {-\map f x} \cdot \paren {-\map g x} + | c = {{Defof|Odd Function}} +}} +{{eqn | r = \map f x \cdot \map g x + | c = +}} +{{eqn | r = \map {\paren {f \cdot g} } x + | c = {{Defof|Pointwise Multiplication of Real-Valued Functions}} +}} +{{end-eqn}} +Thus, by definition, $\paren {f \cdot g}$ is an [[Definition:Even Function|even function]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Mellin Transform of Exponential} +Tags: Mellin Transforms + +\begin{theorem} +Let $a$ be a [[Definition:Complex Number|complex]] [[Definition:Constant|constant]] and $e^t$ be the [[Definition:Complex Exponential Function|complex exponential]]. +Let $\MM$ be the [[Definition:Mellin Transform|Mellin transform]]. +Then: +:$\map {\MM \set {e^{-a t} } } s = a^{-s} \, \map \Gamma s$ +where $\map \Re a, \map \Re s > 0$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\MM \set {e^{-a t} } } s + | r = \int_0^{\to +\infty} t^{s - 1} e^{-a t} \rd t + | c = {{Defof|Mellin Transform}} +}} +{{eqn | r = \int_0^{\to +\infty} \paren {\dfrac t a}^{s - 1} e^{-a \paren {\frac t a} } \frac {\d t} a + | c = [[Integration by Substitution]], $t \mapsto \dfrac t a$, $\d t \mapsto \dfrac {\d t} a$ +}} +{{eqn | r = a^{-s} \int_0^{\to +\infty} t^{s - 1} e^{-t} \rd t + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = a^{-s} \, \map \Gamma s + | c = {{Defof|Integral Form of Gamma Function|Gamma Function}} +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Mellin Transform of Dirac Delta Function} +Tags: Mellin Transforms, Dirac Delta Function + +\begin{theorem} +Let $\delta \left({t}\right)$ be the [[Definition:Dirac Delta Function|Dirac delta function]]. +Let $c$ be a [[Definition:Positive Real Number|positive real number]]. +Let $\mathcal M$ be the [[Definition:Mellin Transform|Mellin transform]]. +Then: +:$\mathcal M \left\{ { \delta_c \left({t}\right)} \right\} \left({s}\right) = c^{s-1}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \mathcal M \left\{ { \delta_c \left({t}\right)} \right\} \left({s}\right) + | r = \int_0^{\to +\infty} t^{s-1} \delta_c \left({t}\right) \rd t + | c = Definition of [[Definition:Mellin Transform|Mellin Transform]] +}} +{{eqn | r = \int_{c^-}^{c^+} t^{s-1} \delta_c \left({t}\right) \rd t + | c = Definition of [[Definition:Dirac Delta Function|Dirac delta function]]: [[Definition:Integrand|integrand]] is elsewhere zero +}} +{{eqn | r = \int_{c^-}^{c^+} c^{s-1} \delta_c \left({t}\right) \rd t + | c = $t$ is [[Definition:Constant|constant]] in [[Definition:Closed Interval|interval]] $\left[{c^- \,.\,.\, c^+}\right]$ +}} +{{eqn | r = c^{s-1} \int_{c^-}^{c^+} \delta_c \left({t}\right) \rd t + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = c^{s-1} + | c = Definition of [[Definition:Dirac Delta Function|Dirac Delta Function]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Mellin Transform of Heaviside Step Function} +Tags: Mellin Transforms, Heaviside Step Function + +\begin{theorem} +Let $c$ be a [[Definition:Constant|constant]] [[Definition:Real Number|real number]]. +Let $u_c \left({t}\right)$ be the [[Definition:Heaviside Step Function|Heaviside step function]]. +Let $\mathcal M$ be the [[Definition:Mellin Transform|Mellin transform]]. +Then: +:$\mathcal M \left\{ {u_c \left({t}\right)}\right\} \left({s}\right) = - \dfrac {c^s} s$ +for $c > 0, \Re \left({s}\right) < 0$. +\end{theorem} + +\begin{proof} +=== [[Mellin Transform of Heaviside Step Function/Lemma|Lemma]] === +{{:Mellin Transform of Heaviside Step Function/Lemma}} +{{begin-eqn}} +{{eqn | l = \mathcal M \left\{ {u_c \left({t}\right)}\right\} \left({s}\right) + | r = \int_0^{\to +\infty} t^{s-1} u_c \left({t}\right) \ \mathrm d t + | c = Definition of [[Definition:Mellin Transform|Mellin Transform]] +}} +{{eqn | r = \int_c^{\to +\infty} t^{s-1} \ \mathrm d t + | c = Definition of [[Definition:Heaviside Step Function|Heaviside step function]]: [[Definition:Integrand|integrand]] is elsewhere zero +}} +{{eqn | r = \left.{\dfrac {t^s} s}\right \vert_c^{+\infty} + | c = [[Primitive of Power]] +}} +{{eqn | r = 0 - \dfrac {c^s} s + | c = By Lemma +}} +{{eqn | r = - \dfrac {c^s} s + | c = +}} +{{end-eqn}} +{{qed}} +[[Category:Mellin Transforms]] +[[Category:Heaviside Step Function]] +fo1by4t7vnl09obqnc6vzy2309b6z6q +\end{proof}<|endoftext|> +\section{Mellin Transform of Heaviside Step Function/Corollary} +Tags: Mellin Transforms, Heaviside Step Function + +\begin{theorem} +:$\mathcal M \left\{ {u\left({c - t}\right)}\right\} \left({s}\right) = \dfrac {c^s} s$ +for $c > 0, \Re \left({s}\right) > 0$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \mathcal M \left\{ {u \left({c-t}\right)}\right\} \left({s}\right) + | r = \int_0^{\to +\infty} t^{s - 1} u \left({c - t}\right) \mathrm d t + | c = Definition of [[Definition:Mellin Transform|Mellin transform]] +}} +{{eqn | r = \int_0^c t^{s-1} \mathrm d t + | c = Definition of [[Definition:Heaviside Step Function|Heaviside step function]]: [[Definition:Integrand|integrand]] is elsewhere zero +}} +{{eqn | r = \left.{\dfrac {t^s} s}\right \vert_0^c + | c = [[Primitive of Power]] +}} +{{eqn | r = \dfrac {c^s} s - 0 + | c = +}} +{{eqn | r = \dfrac {c^s} s + | c = +}} +{{end-eqn}} +{{qed}} +[[Category:Mellin Transforms]] +[[Category:Heaviside Step Function]] +08zk1cdjldattlrkjqqbuxlsdtu0tdh +\end{proof}<|endoftext|> +\section{Mellin Transform of Power Times Function} +Tags: Mellin Transforms + +\begin{theorem} +Let $t^n: \R \to \R$ be [[Definition:Integer Power|$t$ to the $n$th power]] for some $n \in \N_{\ge 0}$. +Let $\mathcal M$ be the [[Definition:Mellin Transform|Mellin transform]]. +Then: +:$\mathcal M \left\{ {t^n f \left({t}\right)} \right\} \left({s}\right) = \mathcal M \left\{ {f \left({t}\right)}\right\} \left({s + n}\right)$ +given that both transforms exist. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \mathcal M \left\{ {t^n f \left({t}\right)}\right\} \left({s}\right) + | r = \int_0^{\to +\infty} t^{s - 1} t^n f \left({t}\right) \ \mathrm d t + | c = Definition of [[Definition:Mellin Transform|Mellin Transform]] +}} +{{eqn | r = \int_0^{\to +\infty} t^{\left({s + n}\right)-1} f \left({t}\right) \ \mathrm d t + | c = [[Exponent Combination Laws]] +}} +{{eqn | r = \mathcal M \left\{ {f \left({t}\right)} \right\} \left({s + n}\right) + | c = Definition of [[Definition:Mellin Transform|Mellin Transform]] +}} +{{end-eqn}} +{{qed}} +[[Category:Mellin Transforms]] +p05uwj706t6ykmc70j1u6xvni60pgpn +\end{proof}<|endoftext|> +\section{Mellin Transform of Dirac Delta Function by Function} +Tags: Mellin Transforms, Dirac Delta Function + +\begin{theorem} +Let $f: \R \to \R$ be a [[Definition:Function|function]]. +Let $c \in \R_{>0}$ be a [[Definition:Positive Real Number|positive]] [[Definition:Constant|constant]] [[Definition:Real Number|real number]]. +Let $\delta_c \left({t}\right)$ be the [[Definition:Dirac Delta Function|Dirac delta function]]. +Let $\mathcal M$ be the [[Definition:Mellin Transform|Mellin transform]]. +Then: +:$\mathcal M \left\{ {\delta_c \left({t}\right) f \left({t}\right)} \right\} \left({s}\right) = c^{s - 1} f \left({c}\right)$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \mathcal M \left\{ {\delta_c \left({t}\right) f \left({t}\right)} \right\} \left({s}\right) + | r = \int_0^{\to +\infty} t^{s - 1} \delta_c \left({t}\right) f \left({t}\right) \ \mathrm d t + | c = {{Defof|Mellin Transform}} +}} +{{eqn | r = \int_{c^-}^{c^+} t^{s - 1} \delta_c \left({t}\right) f \left({t}\right) \ \mathrm d t + | c = {{Defof|Dirac Delta Function}}: [[Definition:Integrand|integrand]] is elsewhere zero +}} +{{eqn | r = \int_{c^-}^{c^+} c^{s - 1} \delta_c \left({t}\right) f \left({c}\right) \ \mathrm d t + | c = $t$ is [[Definition:Constant|constant]] in [[Definition:Closed Interval|interval]] $\left[{c^-\,.\,.\,c^+}\right]$ +}} +{{eqn | r = c^{s - 1} f \left({c}\right) \int_{c^-}^{c^+} \delta_c \left({t}\right) \ \mathrm d t + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = c^{s - 1} f \left({c}\right) + | c = {{Defof|Dirac Delta Function}} +}} +{{end-eqn}} +{{qed}} +[[Category:Mellin Transforms]] +[[Category:Dirac Delta Function]] +9xmrwrtkjvm7an8dlpo9l4778l3rskm +\end{proof}<|endoftext|> +\section{Unity of Ring is Idempotent} +Tags: Ring Theory + +\begin{theorem} +Let $\left({R, +, \circ}\right)$ be a [[Definition:Ring with Unity|ring with unity]] whose [[Definition:Unity of Ring|unity]] is $1_R$. +Then $1_R$ is an [[Definition:Idempotent Element|idempotent element]] of $R$ under the [[Definition:Ring Product|ring product]] $\circ$: +:$1_R \circ 1_R = 1_R$ +\end{theorem} + +\begin{proof} +By definition of [[Definition:Ring with Unity|ring with unity]], $\left({R, \circ}\right)$ is a [[Definition:Monoid|monoid]] whose [[Definition:Identity Element|identity element]] is $1_R$. +From [[Identity Element is Idempotent]] (applied to $1_R$): +:$1_R \circ 1_R = 1_R$ +which was to be proven. +{{qed}} +[[Category:Ring Theory]] +sykxawsy95ekov7hb6d6eq70yxiv8oq +\end{proof}<|endoftext|> +\section{Mellin Transform of Higher Order Exponential} +Tags: Mellin Transforms + +\begin{theorem} +Let $a$ be a [[Definition:Complex Number|complex]] [[Definition:Constant|constant]]. +Let $n$ be a [[Definition:Natural Number|natural number]]. +Let $e^t$ be the [[Definition:Complex Exponential Function|complex exponential of $t$]]. +Let $\MM$ be the [[Definition:Mellin Transform|Mellin transform]]. +Then: +:$\map {\MM \set {e^{-a t^n} } } s = \dfrac {a^{-s/n} } n \map \Gamma {\dfrac s n}$ +where $\map \Gamma z$ is the [[Definition:Gamma Function|Gamma function]] and $\map \Re a$, $\map \Re s > 0$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {\MM \set {e^{-a t^n} } } s + | r = \int_0^{\to +\infty} t^{s-1} e^{-a t^n} \rd t + | c = {{Defof|Mellin Transform}} +}} +{{eqn | r = \int_0^{\to +\infty} \paren {\dfrac t {\sqrt[n] a} }^{s - 1} e^{-a \paren {\dfrac t {\sqrt[n] {a} } }^n} \dfrac {\d t} {\sqrt[n] a} + | c = [[Integration by Substitution]], $t \mapsto \dfrac t {\sqrt[n] a}$, $\d t \mapsto \dfrac {\d t} {\sqrt[n] {a} }$ +}} +{{eqn | r = a^{-s/n} \int_0^{\to +\infty} t^{s - 1} e^{-t^n} \rd t + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = a^{-s/n} \int_0^{\to +\infty} t^{\paren {s - 1}/n} e^{-t} \dfrac {t^{1 / n - 1} \rd t} n + | c = [[Integration by Substitution]], $t \mapsto \sqrt[n] t$, $\d t \mapsto \dfrac {t^{1 / n - 1} \rd t} n$ +}} +{{eqn | r = \dfrac{a^{-s/n} } n \int_0^{\to +\infty} t^{s/n-1} e^{-t} \rd t + | c = [[Primitive of Constant Multiple of Function]], [[Exponent Combination Laws]] +}} +{{eqn | r = \dfrac{a^{-s/n} } n \map \Gamma {\dfrac s n} + | c = {{Defof|Gamma Function}} +}} +{{end-eqn}} +{{qed}} +[[Category:Mellin Transforms]] +2yxilpge4j3790l4fznsrij0k2le73c +\end{proof}<|endoftext|> +\section{Linear Combination of Mellin Transforms} +Tags: Mellin Transforms + +\begin{theorem} +Let $\mathcal M$ be the [[Definition:Mellin Transform|Mellin transform]]. +Let $f \left({t}\right)$, $g \left({t}\right)$ be [[Definition:Function|functions]] such that $\mathcal M \left\{ {f \left({t}\right)}\right\} \left({s}\right)$ and $\mathcal M \left\{ {f \left({t}\right)}\right\} \left({s}\right)$ exist. +Let $\lambda \in \C$ be a [[Definition:Constant|constant]]. +Then: +:$\mathcal M \left\{ {\lambda f \left({t}\right) + g \left({t}\right)}\right\} \left({s}\right) = \lambda \mathcal M \left\{ {f \left({t}\right)}\right\}\left({s}\right) + \mathcal M \left\{ {g \left({t}\right)}\right\} \left({s}\right)$ +everywhere all the above expressions are defined. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \mathcal M \left\{ {\lambda f \left({t}\right) + g \left({t}\right) }\right\} \left({s}\right) + | r = \int_0^{\to +\infty} t^{s - 1} \left({\lambda f \left({t}\right) + g \left({t}\right)}\right) \mathrm d t + | c = Definition of [[Definition:Mellin Transform|Mellin Transform]] +}} +{{eqn | r = \lambda \int_0^{\to +\infty} t^{s - 1} f \left({t}\right) \mathrm d t + \int_0^{\to +\infty} t^{s - 1} g \left({t}\right) \mathrm d t + | c = distributing $t^{s - 1}$, [[Linear Combination of Complex Integrals]] +}} +{{eqn | r = \lambda \mathcal M \left\{ {f \left({t}\right)}\right\} \left({s}\right) + \mathcal M \left\{ {g \left({t}\right)}\right\} \left({s}\right) + | c = Definition of [[Definition:Mellin Transform|Mellin Transform]] +}} +{{end-eqn}} +{{qed}} +[[Category:Mellin Transforms]] +9apkv5zjrkq9lazbsm2tvhhcab6gxin +\end{proof}<|endoftext|> +\section{Complex Numbers as External Direct Product} +Tags: Complex Numbers, External Direct Products + +\begin{theorem} +Let $\struct {\C_{\ne 0}, \times}$ be the [[Definition:Group|group]] of [[Definition:Zero|non-zero]] [[Definition:Complex Number|complex numbers]] under [[Definition:Complex Multiplication|multiplication]]. +Let $\struct {\R_{> 0}, \times}$ be the [[Definition:Group|group]] of [[Definition:Positive Real Number|positive real numbers]] under [[Definition:Real Multiplication|multiplication]]. +Let $\struct {K, \times}$ be the [[Definition:Circle Group|circle group]]. +Then: +:$\struct {\C_{\ne 0}, \times} \cong \struct {\R_{> 0}, \times} \times \struct {K, \times}$ +{{explain|It is apparent that the second $\times$ is Cartesian product, but this is not obvious.}} +\end{theorem} + +\begin{proof} +Let $\phi: \C_{\ne 0} \to \R_{> 0} \times K$ be the [[Definition:Mapping|mapping]]: +:$\map \phi {r e^{i \theta} } = \paren {r, e^{i \theta} }$ +$\forall \tuple {a, b} \in \R_{> 0} \times K:\exists z = a \times b \in \C$ such that: +:$\map \phi z = \tuple {a, b}$ +by [[Complex Multiplication is Closed]] and $\R \subset \C$. +So $\phi$ is [[Definition:Surjective|surjective]]. +To prove $\phi$ is [[Definition:Injective|injective]], let $\map \phi {r_1 e^{i \theta_1} } = \map \phi {r_2 e^{i \theta_2} }$. +{{begin-eqn}} +{{eqn | l = \map \phi {r_1 e^{i \theta_1} } + | r = \map \phi {r_2 e^{i \theta_2} } + | c = +}} +{{eqn | ll= \leadsto + | l = \paren {r_1, e^{i \theta_1} } + | r = \paren {r_2, e^{i \theta_2} } + | c = Definition of $\phi$ +}} +{{eqn | ll= \leadsto + | l = r_1 = r_2 + | o = \land + | r = e^{i \theta_1} = e^{i \theta_2} + | c = +}} +{{eqn | ll= \leadsto + | l = r_1 e^{i \theta_1} + | r = r_2 e^{i \theta_2} + | c = +}} +{{end-eqn}} +So $\phi$ is [[Definition:Injective|injective]], thus [[Definition:Bijective|bijective]]. +Also: +{{begin-eqn}} +{{eqn | l = \map \phi {r_1 e^{i \theta_1} \times r_2 e^{i \theta_2} } + | r = \map \phi {r_1 r_2 e^{i \theta_1 + i \theta_2} } + | c = [[Product of Complex Numbers in Exponential Form]] +}} +{{eqn | r = \paren {r_1 r_2, e^{i \theta_1 + i \theta_2} } + | c = Definition of $\phi$ +}} +{{eqn | r = \paren {r_1 \times r_2, e^{i \theta_1} \times e^{i \theta_2} } + | c = [[Exponential of Sum]] +}} +{{eqn | r = \paren {r_1, e^{i\theta_1} } \times \paren {r_2, e^{i\theta_2} } + | c = {{Defof|Operation Induced by Direct Product|subdef = General Definition}} +}} +{{eqn | r = \map \phi {r_1 e^{i \theta_1} } \times \map \phi {r_2 e^{i \theta_2} } + | c = Definition of $\phi$ +}} +{{end-eqn}} +{{explain|It is not clear how $\paren {r_1 \times r_2, e^{i \theta_1} \times e^{i \theta_2} } {{=}} \paren {r_1, e^{\theta_1} } \times \paren {r_2, e^{\theta_2} }$. The first $\times$ is the times of multiplication, real and complex, and the second appears to be the $\times$ of cartesian product. In any case, it needs to be established that $paren {a \times b, c \times d} {{=}} \tuple {a, c} \times \tuple {b, d}$ and in what context this holds -- and indeed, what it actually means.}} +So $\phi$ is a [[Definition:Group Homomorphism|group homomorphism]]. +Since it is [[Definition:Bijection|bijective]], it is a [[Definition:Group Isomorphism|group isomorphism]]. +{{qed}} +[[Category:Complex Numbers]] +[[Category:External Direct Products]] +3gg7vhxarvgm66j64kqwotvpxn57vdq +\end{proof}<|endoftext|> +\section{Complex Numbers as Quotient Ring of Real Polynomial} +Tags: Complex Numbers, Quotient Rings + +\begin{theorem} +Let $\C$ be the [[Definition:Complex Number|set of complex numbers]]. +Let $P \left[{x}\right]$ be the set of [[Definition:Polynomial over Real Numbers|polynomials over real numbers]], where the [[Definition:Polynomial Coefficient|coefficients]] of the [[Definition:Polynomial over Real Numbers|polynomials]] are [[Definition:Real Number|real]]. +Let $\left\langle{x^2 + 1}\right\rangle = \left\{ {Q \left({x}\right) \left({x^2 + 1}\right): Q \left({x}\right) \in P \left[{x}\right]}\right\}$ be the [[Definition:Ideal of Ring|ideal]] [[Definition:Generator of Ideal|generated]] by $x^2 + 1$ in $P \left[{x}\right]$. +Let $D = P \left[{x}\right] / \left\langle{x^2 + 1}\right\rangle$ be the [[Definition:Quotient Ring|quotient]] of $P \left[{x}\right]$ modulo $\left\langle{x^2 + 1}\right\rangle$. +Then: +:$\left({\C, +, \times}\right) \cong \left({D, +, \times}\right)$ +\end{theorem} + +\begin{proof} +By [[Division Algorithm of Polynomial]], any set in $D$ has an [[Definition:Element|element]] in the form $a + b x$. +Define $\phi: D \to \C$ as a [[Definition:Mapping|mapping]]: +:$\phi \left({\left[\!\left[{a + b x}\right]\!\right]}\right) = a + b i$ +We have that: +:$\forall z = a + b i \in \C : \exists \left[\!\left[{a + b x}\right]\!\right] \in D$ +such that: +:$\phi \left({\left[\!\left[{a + b x}\right]\!\right]}\right) = a + b i = z$ +So $\phi$ is a [[Definition:Surjection|surjection]]. +To prove that it is a [[Definition:injection|injection]], we let: +:$\phi \left({\left[\!\left[{a + b x}\right]\!\right]}\right) = \phi \left({\left[\!\left[{c + d x}\right]\!\right]}\right)$ +So: +{{begin-eqn}} +{{eqn | l = \phi \left({\left[\!\left[{a + b x}\right]\!\right]}\right) + | r = \phi \left({\left[\!\left[{c + d x}\right]\!\right]}\right) + | c = +}} +{{eqn | ll= \iff + | l = a + b i + | r = c + d i + | c = Definition of $\phi$ +}} +{{eqn | ll= \iff + | l = a = c + | o = \land + | r = b = d + | c = Equating both [[Definition:Real Part|real part]] and [[Definition:Imaginary Part|imaginary part]] +}} +{{eqn | ll= \iff + | l = a + b x + | r = c + d x + | c = +}} +{{eqn | ll= \iff + | l = \left[\!\left[{a + b x}\right]\!\right] + | r = \left[\!\left[{c + d x}\right]\!\right] + | c = +}} +{{end-eqn}} +So $\phi$ is an [[Definition:Injection|injection]] and thus a [[Definition:Bijection|bijection]]. +It remains to show that $\phi$ is a [[Definition:Homomorphism|homomorphism]] for the operation $+$ and $\times$. +{{begin-eqn}} +{{eqn | l = \phi \left({\left[\!\left[{a + b x}\right]\!\right] + \left[\!\left[{c + d x}\right]\!\right]}\right) + | r = \phi \left({\left[\!\left[{\left({a + c}\right) + \left({b + d}\right) x}\right]\!\right]}\right) + | c = +}} +{{eqn | r = \left({a + c}\right) + \left({b + d}\right) i + | c = Definition of $\phi$ +}} +{{eqn | r = \left({a + b i}\right) + \left({c + d i}\right) + | c = {{Defof|Complex Addition}} +}} +{{eqn | r = \phi \left({\left[\!\left[{a + b x}\right]\!\right]}\right) + \phi \left({\left[\!\left[{c + d x}\right]\!\right]}\right) + | c = +}} +{{end-eqn}} +{{begin-eqn}} +{{eqn | l = \phi \left({\left[\!\left[{a + b x}\right]\!\right] \times \left[\!\left[{c + d x}\right]\!\right]}\right) + | r = \phi \left({\left[\!\left[{\left({a + b x}\right) \times \left({c + d x}\right)}\right]\!\right]}\right) + | c = +}} +{{eqn | r = \phi \left({\left[\!\left[{a \times c + \left({a \times d + b \times c}\right) x + b \times d \, x^2}\right]\!\right]}\right) + | c = +}} +{{eqn | r = \phi \left({\left[\!\left[{a \times c + \left({a \times d + b \times c}\right) x + b \times d \, x^2 - b \times d \left({x^2 + 1}\right)}\right]\!\right]}\right) + | c = Definition of $D$ as a [[Definition:Quotient Ring|quotient ring]] modulo $\left\langle{x^2 + 1}\right\rangle$ +}} +{{eqn | r = \phi \left({\left[\!\left[{\left({a \times c - b \times d}\right) + \left({a \times d + b \times c}\right) x}\right]\!\right]}\right) + | c = +}} +{{eqn | r = \left({a \times c - b \times d}\right) + \left({a \times d + b \times c}\right) i + | c = Definition of $\phi$ +}} +{{eqn | r = \left({a + b i}\right) \times \left({c + d i}\right) + | c = {{Defof|Complex Multiplication}} +}} +{{eqn | r = \phi \left({\left[\!\left[{a + b x}\right]\!\right]}\right) \times \phi \left({\left[\!\left[{c + d x}\right]\!\right]}\right) + | c = Definition of $\phi$ +}} +{{end-eqn}} +Thus $\phi$ has been demonstrated to be a [[Definition:Bijection|bijective]] [[Definition:Ring Homomorphism|ring homomorphism]] and thus by definition a [[Definition:Ring Isomorphism|ring isomorphism]]. +{{qed}} +[[Category:Complex Numbers]] +[[Category:Quotient Rings]] +ly358o0xbn7fa4eepsu7ghe353n3763 +\end{proof}<|endoftext|> +\section{Quaternion Modulus in Terms of Conjugate} +Tags: Quaternion Modulus + +\begin{theorem} +Let $\mathbf x = a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k$ be a [[Definition:Quaternion|quaternion]]. +Let $\left\vert{\mathbf x}\right\vert$ be the [[Definition:Quaternion Modulus|modulus]] of $\mathbf x$. +Let $\overline{\mathbf x}$ be the [[Definition:Conjugate Quaternion|conjugate]] of $\mathbf x$. +Then: +: $\left\vert{\mathbf x}\right\vert^2 \mathbf 1 = \mathbf x \overline{\mathbf x}$ +\end{theorem} + +\begin{proof} +Let $\mathbf x = a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k$. +Then: +{{begin-eqn}} +{{eqn | l = \mathbf x \overline{\mathbf x} + | r = \left({a^2+b^2+c^2+d^2}\right)\mathbf 1 + | c = [[Product of Quaternion with Conjugate]] +}} +{{eqn | r = \left\vert \mathbf x \right\vert ^2 \mathbf 1 + | c = Definition of [[Definition:Quaternion Modulus|Quaternion Modulus]] +}} +{{end-eqn}} +{{qed}} +[[Category:Quaternion Modulus]] +hjk6fftadnyj046lmgg4czh6x3agskf +\end{proof}<|endoftext|> +\section{Sufficient Condition for Quaternion Multiplication to Commute} +Tags: Quaternions + +\begin{theorem} +In general, [[Definition:Quaternion/Multiplication|quaternion multiplication]] does not [[Definition:Commutative Operation|commute]]. +But, for $\mathbf x,\mathbf y \in \H$, $\mathbf x \times \mathbf y = \mathbf y \times \mathbf x$ if any one of the following conditions hold: +{{begin-eqn}} +{{eqn | n = 1a + | l = \mathbf x, \mathbf y + | o = \in + | r = \set {a \mathbf 1 + b \mathbf i + 0 \mathbf j + 0 \mathbf k: a, b \in \R} +}} +{{eqn | n = 1b + | l = \mathbf x, \mathbf y + | o = \in + | r = \set {a \mathbf 1 + 0 \mathbf i + c \mathbf j + 0 \mathbf k: a, c \in \R} +}} +{{eqn | n = 1c + | l = \mathbf x, \mathbf y + | o = \in + | r = \set {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + d \mathbf k: a, d \in \R} +}} +{{eqn | n = 2a + | l = \mathbf x + | o = \in + | r = \set {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k: a \in \R} +}} +{{eqn | n = 2b + | l = \mathbf y + | o = \in + | r = \set {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k: a \in \R} +}} +{{eqn | n = 3 + | l = \mathbf x + | r = \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \overline {\mathbf y}: a \in \R +}} +{{end-eqn}} +\end{theorem} + +\begin{proof} +=== Proof of $\paren 1$ === +It follows directly from [[Complex Numbers form Subfield of Quaternions]] and [[Complex Multiplication is Commutative]]. +{{qed|lemma}} +=== Proof of $\paren 2$ === +Let $\mathbf x \in \set {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k: a\in \R}$. +Let $\mathbf y = e \mathbf 1 + f \mathbf i + g \mathbf j + h \mathbf k: e, f, g, h \in \R$. +Then: +{{begin-eqn}} +{{eqn | l = \mathbf x \times \mathbf y + | r = a e \mathbf 1 + a f \mathbf i + a g \mathbf j + a h \mathbf k + | c = {{Defof|Quaternion Multiplication}}; the other terms are $0$ +}} +{{eqn | r = \mathbf y \times \mathbf x + | c = {{Defof|Quaternion Multiplication}} +}} +{{end-eqn}} +The above is the proof of $\paren {2a}$, and the proof of $\paren {2b}$ is similar. +{{qed|lemma}} +=== Proof of $\paren 3$ === +{{begin-eqn}} +{{eqn | l = \mathbf x \times \mathbf y + | r = \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \overline {\mathbf y} \times \mathbf y + | c = +}} +{{eqn | r = \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \overline {\mathbf y} \times \overline {\paren {\overline {\mathbf y} } } + | c = [[Quaternion Conjugation is Involution]] +}} +{{eqn | r = \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \cmod {\overline{\mathbf y} }^2 \mathbf 1 + | c = [[Quaternion Modulus in Terms of Conjugate]] +}} +{{eqn | r = \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \cmod {\mathbf y}^2 \mathbf 1 + | c = [[Quaternion Modulus of Conjugate]] +}} +{{eqn | r = \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \mathbf y \times \overline {\mathbf y} + | c = [[Quaternion Modulus in Terms of Conjugate]] +}} +{{eqn | r = \mathbf y \times \paren {a \mathbf 1 + 0 \mathbf i + 0 \mathbf j + 0 \mathbf k} \times \overline {\mathbf y} + | c = from $\paren 2$ +}} +{{eqn | r = \mathbf y \times \mathbf x + | c = +}} +{{end-eqn}} +{{qed}} +[[Category:Quaternions]] +bcp2yc29muagbt0j5sjmkgnju4nqbpm +\end{proof}<|endoftext|> +\section{Complex Conjugation is Involution} +Tags: Complex Conjugates, Involutions + +\begin{theorem} +Let $z = x + i y$ be a [[Definition:Complex Number|complex number]]. +Let $\overline z$ denote the [[Definition:Complex Conjugate|complex conjugate]] of $z$. +Then the [[Definition:Unary Operation|operation]] of [[Definition:Complex Conjugation|complex conjugation]] is an [[Definition:Involution (Mapping)|involution]]: +:$\overline {\paren {\overline z} } = z$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \overline {\paren {\overline z} } + | r = \overline {\paren {\overline {x + i y} } } + | c = Definition of $z$ +}} +{{eqn | r = \overline {x - i y} + | c = {{Defof|Complex Conjugate}} +}} +{{eqn | r = x + i y + | c = {{Defof|Complex Conjugate}} +}} +{{eqn | r = z + | c = Definition of $z$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Quaternion Modulus of Conjugate} +Tags: Quaternion Modulus + +\begin{theorem} +Let $z = a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k$ be a [[Definition:Quaternion|quaternion]]. +Let $\overline z$ be the [[Definition:Quaternion Conjugate|conjugate]] of $z$. +Let $\cmod z$ be the [[Definition:Quaternion Modulus|quaternion modulus]] of $z$. +Then: +:$\cmod {\overline z} = \cmod z$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cmod z + | r = a^2 + b^2 + c^2 + d^2 + | c = {{Defof|Quaternion Modulus}} +}} +{{eqn | l = \cmod {\overline z} + | r = \cmod {a \mathbf 1 - b \mathbf i - c \mathbf j - d \mathbf k} + | c = {{Defof|Quaternion Conjugate}} +}} +{{eqn | r = a^2 + \paren {-b}^2 + \paren {-c}^2 + \paren {-d}^2 + | c = {{Defof|Quaternion Modulus}} +}} +{{eqn | r = a^2 + b^2 + c^2 + d^2 + | c = +}} +{{end-eqn}} +{{qed}} +[[Category:Quaternion Modulus]] +dwgpvr41a8er7xqgy28nekav2mi7kdt +\end{proof}<|endoftext|> +\section{Quaternion Conjugation is Involution} +Tags: Complex Conjugates, Involutions + +\begin{theorem} +Let $\mathbf x = a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k$ be a [[Definition:Quaternion|quaternion]]. +Let $\overline {\mathbf x}$ denote the [[Definition:Quaternion Conjugate|quaternion conjugate]] of $\mathbf x$. +Then the [[Definition:Unary Operation|operation]] of [[Definition:Quaternion Conjugate/Quaternion Conjugation|quaternion conjugation]] is an [[Definition:Involution (Mapping)|involution]]: +:$\overline {\paren {\overline {\mathbf x} } } = \mathbf x$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \overline {\paren {\overline {\mathbf x} } } + | r = \overline {\paren {\overline {a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k} } } + | c = Definition of $\mathbf x$ +}} +{{eqn | r = \overline {a \mathbf 1 - b \mathbf i - c \mathbf j - d \mathbf k} + | c = {{Defof|Quaternion Conjugate}} +}} +{{eqn | r = a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k + | c = {{Defof|Quaternion Conjugate}} +}} +{{eqn | r = \mathbf x + | c = Definition of $\mathbf x$ +}} +{{end-eqn}} +{{qed}} +[[Category:Complex Conjugates]] +[[Category:Involutions]] +ntszc6votk2rt9orjp6f2atx26i52qa +\end{proof}<|endoftext|> +\section{Sum of Quaternion Conjugates} +Tags: Quaternions + +\begin{theorem} +Let $\mathbf x, \mathbf y \in \mathbb H$ be [[Definition:Quaternion|quaternions]]. +Let $\overline {\mathbf x}$ be the [[Definition:Quaternion Conjugate|conjugate]] of $\mathbf x$. +Then: +:$\overline {\mathbf x + \mathbf y} = \overline {\mathbf x} + \overline {\mathbf y}$ +\end{theorem} + +\begin{proof} +Let: +:$\mathbf x = a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k$ +:$\mathbf y = e \mathbf 1 + f \mathbf i + g \mathbf j + h \mathbf k$ +Then: +{{begin-eqn}} +{{eqn | l = \overline {\mathbf x + \mathbf y} + | r = \overline {\paren {a \mathbf 1 + b \mathbf i + c \mathbf j + d \mathbf k} + \paren {e \mathbf 1 + f \mathbf i + g \mathbf j + h \mathbf k} } + | c = +}} +{{eqn | r = \overline {\paren {a + e} \mathbf 1 + \paren {b + f} \mathbf i + \paren {c + g} \mathbf j + \paren {d + h} \mathbf k} + | c = {{Defof|Quaternion Addition}} +}} +{{eqn | r = \paren {a + e} \mathbf 1 - \paren {b + f} \mathbf i - \paren {c + g} \mathbf j - \paren {d + h} \mathbf k + | c = {{Defof|Quaternion Conjugate}} +}} +{{eqn | r = \paren {a \mathbf 1 - b \mathbf i - c \mathbf j - d \mathbf k} + \paren {e \mathbf 1 - f \mathbf i - g \mathbf j - h \mathbf k} + | c = {{Defof|Quaternion Addition}} +}} +{{eqn | r = \overline {\mathbf x} + \overline {\mathbf y} + | c = {{Defof|Quaternion Conjugate}} +}} +{{end-eqn}} +{{qed}} +[[Category:Quaternions]] +osfjph590poncxwphvgne964u7ovsaz +\end{proof}<|endoftext|> +\section{Principle of Recursive Definition for Peano Structure} +Tags: Principle of Recursive Definition, Mapping Theory, Peano's Axioms + +\begin{theorem} +Let $\struct {P, 0, s}$ be a [[Definition:Peano Structure|Peano structure]]. +Let $T$ be a [[Definition:Set|set]]. +Let $a \in T$. +Let $g: T \to T$ be a [[Definition:Mapping|mapping]]. +Then there exists exactly one [[Definition:Mapping|mapping]] $f: P \to T$ such that: +:$\forall x \in P: \map f x = \begin{cases} +a & : x = 0 \\ +\map g {\map f n} & : x = \map s n +\end{cases}$ +\end{theorem} + +\begin{proof} +For each $n \in P$, define $\map A n$ as: +:$\map A n = \set {h: P \to T \mid \map h 0 = a \land \forall m < n: \map h {\map s n} = \map g {\map h m} }$ +{{MissingLinks|Ordering $<$ on Peano Structure}} +First, we prove for all $n \in P$ that $\map A n$ is not [[Definition:Empty Set|empty]]. +More formally, we prove that $A = \set {n \in P: \map A n \ne \O} = P$. +For $n = 0$, there are no $m < n$, so any [[Definition:Mapping|mapping]] $h: P \to T$ with $\map h 0 = a$ is an [[Definition:Element|element]] of $\map A 0$. +Since the [[Definition:Constant Mapping|constant mapping]] $\map h n = a$ satisfies this condition, it follows that $0 \in A$. +Now suppose that $n \in A$, and let $h \in \map A n$. +Define $h': P \to T$ by, for all $n' \in P$: +:$\map {h'} {n'} = \begin {cases} +\map h {n'} & : n' \le n \\ +\map g {\map h n} & : n' > n +\end {cases}$ +To check that $h' \in \map A {\map s n}$, we have to verify that: +:$\forall m < \map s n: \map {h'} {\map s m} = \map g {\map {h'} m}$ +Since $h \in \map A n$, only the case $m = n$ needs to be verified: +{{begin-eqn}} +{{eqn | l = \map {h'} {\map s n} + | r = \map g {\map h n} + | c = as $\map s n > n$ +}} +{{eqn | r = \map g {\map {h'} n} + | c = Definition of $h'$ +}} +{{end-eqn}} +Therefore $h' \in \map A {\map s n}$, and so $\map A {\map s n} \ne \O$. +That means $\map s n \in A$, and by [[Axiom:Peano's Axioms|Axiom $(\text P 5)$]], we conclude $A = P$. +Now let $A' = \set {n \in P: \forall h, h' \in \map A n: \map h n = \map {h'} n}$. +Then by definition of $\map A )$, it follows that $0 \in A'$. +Suppose now that $n \in A'$, and let $h, h' \in \map A {\map s n}$. +Then: +{{begin-eqn}} +{{eqn | l = \map h {\map s n} + | r = \map g {\map h n} + | c = as $h \in \map A {\map s n}$ +}} +{{eqn | r = \map g {\map {h'} n} + | c = as $h, h' \in \map A n$ +}} +{{eqn | r = \map {h'} {\map s n} + | c = as $h \in \map A {\map s n}$ +}} +{{end-eqn}} +Hence $\map s n \in A'$, and by [[Axiom:Peano's Axioms|Axiom $(\text P 5)$]], we conclude $A' = P$. +Because any $f: \N \to T$ as in the theorem statement needs to be in all $\map A n$, it follows that such an $f$ is necessarily unique. +Finally, we can define $f: P \to T$ by: +:$\map f n = \map {h_n} n$ +where $h_n \in \map A n$. +It is immediate from the definition of the $\map A n$ that: +:$\forall m, n, m < n: \map A n \subseteq \map A m$ +Hence, for every $m, n$ such that $m < n$: +:$\map {h_m} m = \map {h_n} n$ +Thus, for all $m < n$: +:$\map f m = \map {h_n} m$ +Since $h_n \in \map A n$, it follows that also: +:$f \in \map A n$ +Thus, since $n$ was arbitrary, it follows that for all $n \in P$: +:$\map f {\map s n} = \map g {\map f n}$ +as desired. +{{qed}} +[[Category:Principle of Recursive Definition]] +[[Category:Mapping Theory]] +[[Category:Peano's Axioms]] +08ideooijeeu89e0f7hx1sewzyu595e +\end{proof}<|endoftext|> +\section{Principle of Recursive Definition for Minimal Infinite Successor Set} +Tags: Mapping Theory, Minimal Infinite Successor Set, Principle of Recursive Definition + +\begin{theorem} +Let $\omega$ be the [[Definition:Minimal Infinite Successor Set|minimal infinite successor set]]. +Let $T$ be a [[Definition:Set|set]]. +Let $a \in T$. +Let $g: T \to T$ be a [[Definition:Mapping|mapping]]. +Then there exists exactly one [[Definition:Mapping|mapping]] $f: \omega \to T$ such that: +:$\forall x \in \omega: \map f x = \begin {cases} +a & : x = \O \\ +\map g {\map f n} & : x = n^+ +\end {cases}$ +where $n^+$ is the [[Definition:Successor Set|successor set]] of $n$. +\end{theorem} + +\begin{proof} +Take the function $F$ generated in [[Transfinite Recursion/Theorem 2|the second principle of transfinite recursion]]. +Set $f = F {\restriction_\omega}$. +{{begin-eqn}} +{{eqn | l = \map f \O + | r = \map F \O + | c = $\O \in \omega$ +}} +{{eqn | l = \map f {n^+} + | r = \map F {n^+} + | c = $n^+ \in \omega$ +}} +{{eqn | r = \map g {\map F n} + | c = [[Transfinite Recursion/Theorem 2|Second Principle of Transfinite Recursion]] +}} +{{eqn | r = \map g {\map f n} + | c = $n \in \omega$ and the definition of $f$ +}} +{{end-eqn}} +Therefore, such a function exists. +Now, suppose there are two functions $f$ and $f'$ that satisfy this: +:$\map f \O = \map {f'} \O$ +Then: +{{begin-eqn}} +{{eqn | l = \map f {n^+} + | r = \map g {\map f n} + | c = [[Definition:By Hypothesis|By Hypothesis]] +}} +{{eqn | r = \map g {\map {f'} n} + | c = Inductive Hypothesis +}} +{{eqn | r = \map {f'} {n^+} + | c = [[Definition:By Hypothesis|By Hypothesis]] +}} +{{end-eqn}} +This completes the proof. +{{qed}} + +\end{proof}<|endoftext|> +\section{Ordering on 1-Based Natural Numbers is Trichotomy} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Definition:1-Based Natural Numbers|$1$-based natural numbers]]. +Let $<$ be the [[Definition:Ordering on 1-Based Natural Numbers|strict ordering on $\N_{>0}$]]. +Then exactly one of the following is true: +:$(1): \quad a = b$ +:$(2): \quad a > b$ +:$(3): \quad a < b$ +That is, $<$ is a [[Definition:Trichotomy|trichotomy]] on $\N_{> 0}$. +\end{theorem} + +\begin{proof} +Using the [[Axiom:Axiomatization of 1-Based Natural Numbers|following axioms]]: +{{:Axiom:Axiomatization of 1-Based Natural Numbers}} +[[Axiom:Axiomatization of 1-Based Natural Numbers|Axiom $E$]] states: +:$\forall a, b \in \N_{>0}$, either: +::$a = b$, in which case $(1)$ holds +::$\exists x \in \N_{> 0}: a = b + x$, in which case, by definition of the [[Definition:Ordering on Natural Numbers|ordering defined]], $a > b$, in which case $(2)$ holds +::$\exists x \in \N_{> 0}: a + x = b$, in which case, by definition of the [[Definition:Ordering on Natural Numbers|ordering defined]], $a < b$, in which case $(3)$ holds. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Product of Quaternion Conjugates} +Tags: Quaternions + +\begin{theorem} +Let $\mathbf x, \mathbf y \in \mathbb H$ be [[Definition:Quaternion|quaternions]]. +Let $\overline{\mathbf x}$ be the [[Definition:Quaternion Conjugate|conjugate]] of $\mathbf x$. +Then: +:$\overline{\mathbf x \times \mathbf y} = \overline{\mathbf y} \times \overline{\mathbf x}$ +but in general: +:$\overline{\mathbf x \times \mathbf y} \ne \overline{\mathbf x} \times \overline{\mathbf y}$ +\end{theorem} + +\begin{proof} +Consider the [[Matrix Form of Quaternion|matrix form]] of $\mathbf x$ and $\mathbf y$: +:$\mathbf x = \begin{bmatrix} a & b \\ -\overline b & \overline a \end{bmatrix}$ +:$\mathbf y = \begin{bmatrix} c & d \\ -\overline d & \overline c \end{bmatrix}$ +where $a, b, c, d \in \C$. +{{begin-eqn}} +{{eqn | l = \overline{\mathbf x \times \mathbf y} + | r = \overline{\begin{bmatrix} a & b \\ -\overline b & \overline a \end{bmatrix} \begin{bmatrix} c & d \\ -\overline d & \overline c \end{bmatrix} } + | c = +}} +{{eqn | r = \overline{\begin{bmatrix} a c - b \overline d & a d + b \overline c \\ - \overline b c - \overline a \overline d & - \overline b d + \overline a \overline c \end{bmatrix} } + | c = {{Defof|Matrix Product (Conventional)}} +}} +{{eqn | r = \begin{bmatrix} \overline{a c - b \overline d} & -\left({a d + b \overline c}\right) \\ \overline b c + \overline a \overline d & \overline{- \overline b d + \overline a \overline c} \end{bmatrix} + | c = {{Defof|Conjugate Quaternion|subdef = Matrix Form}} +}} +{{eqn | r = \begin{bmatrix} \overline a \overline c - \overline b d & - a d - b \overline c \\ \overline {a d} + \overline b c & a c - b \overline d \end{bmatrix} + | c = [[Complex Conjugation is Automorphism]], [[Complex Conjugation is Involution]] +}} +{{end-eqn}} +{{begin-eqn}} +{{eqn | l = \overline{\mathbf y} \times \overline{\mathbf x} + | r = \overline{\begin{bmatrix} c & d \\ -\overline d & \overline c \end{bmatrix} } \ \overline{\begin{bmatrix} a & b \\ -\overline b & \overline a \end{bmatrix} } + | c = +}} +{{eqn | r = \begin{bmatrix} \overline c & - d \\ \overline d & c \end{bmatrix} \begin{bmatrix} \overline a & - b \\ \overline b & a \end{bmatrix} + | c = {{Defof|Conjugate Quaternion|subdef = Matrix Form}} +}} +{{eqn | r = \begin{bmatrix} \overline a \overline c - \overline b d & - a d - b \overline c \\ \overline {a d} + \overline b c & a c - b \overline d \end{bmatrix} + | c = {{Defof|Matrix Product (Conventional)}} +}} +{{eqn | r = \overline{\mathbf x \times \mathbf y} + | c = from above +}} +{{end-eqn}} +but: +{{begin-eqn}} +{{eqn | l = \overline{\mathbf x} \times \overline{\mathbf y} + | r = \overline{\begin{bmatrix} a & b \\ -\overline b & \overline a \end{bmatrix} } \ \overline{\begin{bmatrix} c & d \\ -\overline d & \overline c \end{bmatrix} } + | c = +}} +{{eqn | r = \begin{bmatrix} \overline a & - b \\ \overline b & a \end{bmatrix} \times \begin{bmatrix} \overline c & - d \\ \overline d & c \end{bmatrix} + | c = {{Defof|Conjugate Quaternion|subdef = Matrix Form}} +}} +{{eqn | r = \begin{bmatrix} \overline a \overline c - b \overline d & - \overline a d - b c \\ a \overline d + \overline {b c} & a c - \overline b d \end{bmatrix} + | c = {{Defof|Matrix Product (Conventional)}} +}} +{{eqn | o = \ne + | r = \overline{\mathbf x \times \mathbf y} + | c = from above +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Quaternion Modulus of Product of Quaternions} +Tags: Quaternion Modulus + +\begin{theorem} +Let $\mathbf x, \mathbf y$ be [[Definition:Quaternion|quaternions]]. +Let $\left\vert{\mathbf x}\right\vert$ be the [[Definition:Quaternion Modulus|modulus]] of $\mathbf x$. +Then: +:$\left\vert{\mathbf x \mathbf y}\right\vert = \left\vert{\mathbf x}\right\vert \left\vert{\mathbf y}\right\vert$ +\end{theorem} + +\begin{proof} +Let $\mathbf x, \mathbf y$ be in their [[Matrix Form of Quaternion|matrix form]]. +Then: +{{begin-eqn}} +{{eqn | l = \left\vert{\mathbf x \mathbf y}\right\vert + | r = \sqrt{\det \left({\mathbf x \mathbf y}\right)} + | c = Definition of [[Definition:Quaternion Modulus#Matrix Form|Modulus]] +}} +{{eqn | r = \sqrt{\det \left({\mathbf x}\right) \det \left({\mathbf y}\right)} + | c = [[Determinant of Matrix Product]] +}} +{{eqn | r = \sqrt{\det \left({\mathbf x}\right)} \sqrt{\det \left({\mathbf y}\right)} + | c = [[Exponent Combination Laws]] +}} +{{eqn | r = \left\vert{\mathbf x}\right\vert \left\vert{\mathbf y}\right\vert + | c = Definition of [[Definition:Quaternion Modulus#Matrix Form|Modulus]] +}} +{{end-eqn}} +{{qed}} +[[Category:Quaternion Modulus]] +j6tnwbfmkvn4zri49y8437bsd1g3eya +\end{proof}<|endoftext|> +\section{Octonion Conjugation is Involution} +Tags: Octonions, Involutions + +\begin{theorem} +Let $x = \tuple {a, b}: a, b \in \mathbb H$ be a [[Definition:Octonion|octonion]]. +Let $\overline x$ be the [[Definition:Conjugate of Octonion|conjugate]] of $x$. +Then: +:$\overline \cdot: x \mapsto \overline x$ +is an [[Definition:Involution (Mapping)|involution]]. +That is: +:$\overline {\paren {\overline x} } = x$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \overline {\paren {\overline x} } + | r = \overline {\paren {\overline {\tuple {a, b} } } } + | c = {{Defof|Octonion}} +}} +{{eqn | r = \overline {\tuple {\overline a, -b} } + | c = {{Defof|Conjugate of Octonion}} +}} +{{eqn | r = \tuple {\overline {\paren {\overline a} }, -\paren {-b} } + | c = {{Defof|Conjugate of Octonion}} +}} +{{eqn | r = \tuple {a, b} + | c = [[Quaternion Conjugation is Involution]] +}} +{{eqn | r = x + | c = {{Defof|Octonion}} +}} +{{end-eqn}} +{{qed}} +[[Category:Octonions]] +[[Category:Involutions]] +sr6an7ebrb6i8b07h018s11isrpfhg5 +\end{proof}<|endoftext|> +\section{Ordering on 1-Based Natural Numbers is Compatible with Addition} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Definition:1-Based Natural Numbers|$1$-based natural numbers]]. +Let $+$ denote [[Definition:Addition on 1-Based Natural Numbers|addition]] on $\N_{>0}$. +Let $<$ be the [[Definition:Ordering on 1-Based Natural Numbers|strict ordering on $\N_{>0}$]]. +Then: +:$\forall a, b, n \in \N_{>0}: a < b \implies a + n < b + n$ +That is, $>$ is [[Definition:Relation Compatible with Operation|compatible]] with $+$ on $\N_{>0}$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = a + | o = < + | r = b + | c = +}} +{{eqn | ll= \implies + | l = \exists c \in \N_{>0}: a + | r = b + c + | c = Definition of [[Definition:Ordering on 1-Based Natural Numbers|Ordering on $1$-Based Natural Numbers]] +}} +{{eqn | ll= \implies + | l = a + n + | r = \left({b + c}\right) + n + | c = Definition of [[Definition:Binary Operation|binary operation]] +}} +{{eqn | r = b + \left({c + n}\right) + | c = [[Natural Number Addition is Associative]] +}} +{{eqn | r = b + \left({n + c}\right) + | c = [[Natural Number Addition is Commutative]] +}} +{{eqn | r = \left({b + n}\right) + c + | c = [[Natural Number Addition is Associative]] +}} +{{eqn | ll= \implies + | l = a + n + | o = < + | r = b + n + | c = Definition of [[Definition:Ordering on 1-Based Natural Numbers|Ordering on $1$-Based Natural Numbers]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Ordering on 1-Based Natural Numbers is Compatible with Multiplication} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Definition:1-Based Natural Numbers|$1$-based natural numbers]]. +Let $\times$ denote [[Definition:Multiplication on 1-Based Natural Numbers|multiplication]] on $\N_{>0}$. +Let $<$ be the [[Definition:Ordering on 1-Based Natural Numbers|strict ordering on $\N_{>0}$]]. +Then: +:$\forall a, b, n \in \N_{>0}: a < b \implies a \times n < b \times n$ +That is, $>$ is [[Definition:Relation Compatible with Operation|compatible]] with $\times$ on $\N_{>0}$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = a + | o = < + | r = b + | c = +}} +{{eqn | ll= \leadsto + | l = \exists c \in \N_{>0}: a + | r = b + c + | c = {{Defof|Ordering on 1-Based Natural Numbers|$<$ on $\N_{>0}$}} +}} +{{eqn | ll= \leadsto + | l = a \times n + | r = \paren {b + c} \times n + | c = {{Defof|Binary Operation}} +}} +{{eqn | r = \paren {b \times n} + \paren {c \times n} + | c = [[Natural Number Multiplication Distributes over Addition]] +}} +{{eqn | ll= \leadsto + | l = a \times n + | o = < + | r = b \times n + | c = {{Defof|Ordering on 1-Based Natural Numbers|$<$ on $\N_{>0}$}}: $c \times n \in \N_{> 0}$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Integral between Limits is Independent of Direction} +Tags: Definite Integrals + +\begin{theorem} +Let $f$ be a [[Definition:Real Function|real function]] which is [[Definition:Integrable Function|integrable]] on the [[Definition:Open Real Interval|interval]] $\openint a b$. +Then: +:$\ds \int_a^b \map f x \rd x = \int_a^b \map f {a + b - x} \rd x$ +\end{theorem} + +\begin{proof} +Let $z = a + b - x$. +Then: +:$\dfrac {\d z} {\d x} = -1$ +and: +:$x = a \implies z = a + b - a = b$ +:$x = b \implies z = a + b - b = a$ +So: +{{begin-eqn}} +{{eqn | l = \int_a^b \map f {a + b - x} \rd x + | r = \int_b^a \map f z \paren {-1} \rd z + | c = [[Integration by Substitution]] +}} +{{eqn | r = \int_a^b \map f z \rd z + | c = [[Reversal of Limits of Definite Integral]] +}} +{{eqn | r = \int_a^b \map f x \rd x + | c = renaming variables +}} +{{end-eqn}} +{{qed}} +[[Category:Definite Integrals]] +657vwqwz0lc70jyt38jen81j7fstvhr +\end{proof}<|endoftext|> +\section{Countable Set equals Range of Sequence} +Tags: Countable Sets + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]]. +Then $S$ is [[Definition:Countable Set|countable]] {{iff}} there exists a [[Definition:Sequence|sequence]] $\sequence {s_i}_{i \mathop \in N}$ where $N$ is a [[Definition:Subset|subset]] of $\N$ such that $S$ equals the [[Definition:Range of Sequence|range]] of $\sequence {s_i}_{i \mathop \in N}$. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Assume that $S$ is [[Definition:Countable Set/Definition 2|countable]]. +We need to prove that there exists a [[Definition:Sequence|sequence]] $\sequence {s_i}_{i \mathop \in N}$, $N \subseteq \N$, such that $S$ equals the [[Definition:Range of Sequence|range]] of $\sequence {s_i}_{i \mathop \in N}$. +The [[Definition:Range of Sequence|range]] of $\sequence {s_i}_{i \mathop \in N}$ is defined as $\set {s_i: i \in N}$. +;Case 1. $S$ is [[Definition:Empty Set|empty]]. +[[Empty Set is Countable]] confirms that $S$ is [[Definition:Countable Set/Definition 2|countable]]. +Define the [[Definition:Sequence/Empty Sequence|empty sequence]] as $\sequence {s_i}_{i \mathop \in \O}$. +The [[Definition:Range of Sequence|range]] of the empty sequence is $\set {s_i: i \in \O} = \O$. +$S$ equals the range of the empty sequence as $S$ equals $\O$. +This finishes the argument for this case. +;Case $2$. $S$ is [[Definition:Finite Set|finite]] and not [[Definition:Empty Set|empty]]. +Let $n \in \N_{>0}$ be the number of [[Definition:Element|elements]] of $S$. +Define a [[Definition:Sequence|sequence]] $\sequence {s_i}_{1 \mathop \le \mathop i \mathop \le \mathop n}$ by going through every element of S: +:Let $s_1$ be any element of $S$. +:Let $s_2$ be any element of $S \setminus \set {s_1}$. +:... +:Let $s_n$ be the single element of $S \setminus \set {s_i: 1 \le i \le n - 1}$. +From this definition follows that every [[Definition:Element|element]] of $S$ equals some $s_i$, $1 \le i \le n$. +So, $S \subseteq \set {s_i: 1 \le i \le n}$. +Every number $s_i$, $1 \le i \le n$, equals some element of $S$. +So, $\set {s_i: 1 \le i \le n} \subseteq S$. +Accordingly, $S = \set {s_i: 1 \le i \le n}$. +By the definition of [[Definition:Range of Sequence|range]], $\set {s_i: 1 \le i \le n}$ equals the [[Definition:Range of Sequence|range]] of the [[Definition:Sequence|sequence]] $\sequence {s_i}_{1 \mathop \le \mathop i \mathop \le \mathop n}$. +Therefore, $S$ equals the [[Definition:Range of Sequence|range]] of $\sequence {s_i}_{1 \mathop \le \mathop i \mathop \le \mathop n}$. +This finishes the proof for case 2. +;Case $3$. $S$ is [[Definition:Infinite Set|infinite]]. +Since $S$ is [[Definition:Countable Set/Definition 2|countable]] and [[Definition:Infinite Set|infinite]], $S$ is [[Definition:Countably Infinite Set|countably infinite]]. +By the definition of [[Definition:Countably Infinite Set|countably infinite]], $S$ can be written: +:$\set {t_0, t_1, \ldots, t_n, \ldots}$ +where $n$ runs over all the [[Definition:Natural Numbers|natural numbers]], $\N$. +In other words, we have: +:$S = \set {t_i: i \in \N}$ +We intend to produce a [[Definition:Sequence|sequence]] from $S$. +We start with the [[Definition:Sequence/Empty Sequence|empty sequence]]. +Using $\N$, the set to which $S$ is [[Definition:Set Equivalence|equivalent]], we put all the [[Definition:Element|elements]] of $S$ one by one into the sequence. +The result is $\sequence {t_i}_{i \mathop \in \N}$. +The [[Definition:Range of Sequence|range]] of $\sequence {t_i}_{i \mathop \in \N}$ is $\set {t_i: i \in \N}$, which equals $S$. +In other words, $S$ is the [[Definition:Range of Sequence|range]] of the [[Definition:Sequence|sequence]] $\sequence {t_i}_{i \mathop \in \N}$. +This finishes the necessary part of the proof. +{{qed}} +=== Sufficient Condition === +Assume that there exists a [[Definition:Sequence|sequence]] $\sequence {s_i}_{i \mathop \in N}$ where $N$ is a [[Definition:Subset|subset]] of $\N$ such that $S$ equals the [[Definition:Range of Sequence|range]] of that sequence. +We need to prove that $S$ is [[Definition:Countable Set/Definition 2|countable]]. +;Case 1. $S$ is [[Definition:Finite Set|finite]]. +By the definition of [[Definition:Countable Set/Definition 2|countable]] $S$ is countable. +;Case 2. $S$ is [[Definition:Infinite Set|infinite]]. +We know that $S$ equals the [[Definition:Range of Sequence|range]] of the [[Definition:Sequence|sequence]] $\sequence {s_i}_{i \mathop \in N}$ where $N$ is an [[Definition:Infinite Set|infinite]] [[Definition:Subset|subset]] of $\N$. +The [[Definition:Range of Sequence|range]] of $\sequence {s_i}_{i \mathop \in N}$ is defined as $\set {s_i: i \in N}$, so we have +:$S = \set {s_i: i \in N}$ +We need to prove that $S$ is [[Definition:Countably Infinite Set|countably infinite]]. +That a set is [[Definition:Countably Infinite Set|countably infinite]], means that it is of the form: +:$\set {t_0, t_1, \ldots, t_n, \ldots}$ +where $n$ runs over all the [[Definition:Natural Numbers|natural numbers]], $\N$. +Accordingly, we need to show that a set $\set {t_i: i \in \N}$ exists such that: +:$S = \set {t_i: i \in \N}$ +Assume that $N = \N$. +Then $S$ is [[Definition:Countably Infinite Set|countably infinite]] by definition. +The only other alternative is $N \subset \N$. +Define: +:$t_i = s_i$ for every $i$ in $N$ +:$t_i = s_1$ for every $i$ in $\N \setminus N$ +Thus, $t_i$ is defined for every $i$ in $\N$. +We have: +{{begin-eqn}} +{{eqn | l = \set {t_i: i \in \N} + | r = \set {t_i: i \in N} \cup \set {t_i: i \in \N \setminus N} + | c = as $\N = N \cup \paren {\N \setminus N}$ +}} +{{eqn | r = \set {s_i: i \in N} \cup \set {t_i: i \in \N \setminus N} + | c = as $s_i = t_i$ for every $i$ in $N$ +}} +{{eqn | r = S \cup \set {t_i: i \in \N \setminus N} + | c = by the definition of $S$ +}} +{{eqn | r = S \cup \set {s_1} + | c = as $t_i = s_1$ for every $i$ in $\N \setminus N$ +}} +{{eqn | r = S + | c = as $\set {s_1} \subset S$ as $s_1 \in S$ +}} +{{end-eqn}} +All in all, $S = \set {t_i: i \in \N}$. +Therefore, $S$ is [[Definition:Countably Infinite Set|countably infinite]] and thus [[Definition:Countable Set/Definition 2|countable]]. +This finishes the last part of the proof. +{{qed}} +\end{proof}<|endoftext|> +\section{Initial Segment of Natural Numbers determined by Zero is Empty} +Tags: Natural Numbers + +\begin{theorem} +Let $\N_k$ denote the [[Definition:Initial Segment of Natural Numbers|initial segment of the natural numbers]] determined by $k$: +:$\N_k = \set {0, 1, 2, 3, \ldots, k - 1}$ +Then $\N_0 = \O$. +\end{theorem} + +\begin{proof} +From the [[Definition:Initial Segment of Natural Numbers|definition of $\N_0$]]: +:$\N_0 = \set {n \in \N: n < 0}$ +From the definition of [[Definition:Zero (Number)|zero]], $0$ is the [[Definition:Minimal Element|minimal element of $\N$]]. +So there is no [[Definition:Element|element]] $n$ of $\N$ such that $n < 0$. +Thus $\N_0 = \O$. +{{qed}} +[[Category:Natural Numbers]] +qbigqp7k4plig58dwsn1gh24fsrjyqb +\end{proof}<|endoftext|> +\section{Initial Segment of One-Based Natural Numbers determined by Zero is Empty} +Tags: Natural Numbers + +\begin{theorem} +Let $\N^*_k$ denote the [[Definition:Initial Segment of One-Based Natural Numbers|initial segment of the one-based natural numbers]] determined by $k$: +:$\N^*_k = \set {1, 2, 3, \ldots, k - 1, k}$ +Then $\N^*_0 = \O$. +\end{theorem} + +\begin{proof} +From the [[Definition:Initial Segment of One-Based Natural Numbers|definition of $\N^*_0$]]: +:$\N^*_0 = \set {n \in \N_{>0}: n \le 0}$ +From the definition of [[Definition:One|one]], the [[Definition:Minimal Element|minimal element]] of $\N_{>0}$ is $1$. +From [[Zero Strictly Precedes One]] we know that $0 < 1$. +So there is no [[Definition:Element|element]] $n$ of $\N_{>0}$ such that $n \le 0$. +Thus $\N^*_0 = \O$. +{{qed}} +\end{proof}<|endoftext|> +\section{Heine-Borel Theorem/Real Line/Closed and Bounded Interval} +Tags: Real Analysis, Direct Proofs + +\begin{theorem} +Let $\left[{a \,.\,.\, b}\right]$, $a < b$, be a [[Definition:Closed Real Interval|closed and bounded real interval]]. +Let $S$ be a [[Definition:Set|set]] of [[Definition:Open Set (Real Analysis)|open real sets]]. +Let $S$ be a [[Definition:Cover of Set|cover]] of $\left[{a \,.\,.\, b}\right]$. +Then there is a [[Definition:Finite Set|finite]] [[Definition:Subset|subset]] of $S$ that [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, b}\right]$. +\end{theorem} + +\begin{proof} +Consider the [[Definition:Set|set]] $T = \left\{ {x \in \left[{a \,.\,.\, b}\right]: \left[{a \,.\,.\, x}\right] \text { is covered by a finite subset of } S}\right\}$. +=== Step 1: $a \in T$ === +It is demonstrated that the [[Definition:Real Number|number]] $a$ is an [[Definition:Element|element]] of $T$. +Consider the [[Definition:Real Interval|interval]] $\left[ {a \,.\,.\, a} \right]$. +$S$ [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, a}\right]$ as it [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, b}\right]$, a [[Definition:Superset|superset]] of $\left[{a \,.\,.\, a}\right]$. +Since $S$ [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, a}\right]$, there is a [[Definition:Set|set]] $O$ in $S$ that [[Definition:Element|contains]] $a$. +We observe: +:$\left\{ {O}\right\}$ is a [[Definition:Subset|subset]] of $S$ +:$\left\{ {O}\right\}$ is [[Definition:Finite Set|finite]] as it contains one [[Definition:Element|element]] +:$\left\{ {O}\right\}$ [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, a}\right]$ +By the definition of $T$, $a$ is an [[Definition:Element|element]] of $T$. +=== Step 2: $T$ has Supremum === +It is demonstrated that $T$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +We know that $a \in T$. +Therefore, $T$ is [[Definition:Non-Empty Set|non-empty]]. +Also, $T$ is [[Definition:Bounded Subset of Real Numbers|bounded]] as $T$ is a [[Definition:Subset|subset]] of $\left[{a \,.\,.\, b}\right]$ which is [[Definition:Bounded Subset of Real Numbers|bounded]]. +From these two properties of $T$, it follows by the [[Continuum Property|continuum property of the real numbers]] that $T$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]], which will be denoted $c$. +=== Step 3: $c \in \left[{a \,.\,.\, b}\right]$ === +It is demonstrated that the [[Definition:Real Number|number]] $c$ is an [[Definition:Element|element]] of $\left[{a \,.\,.\, b}\right]$. +Every [[Definition:Element|element]] of $T$ is less than or equal to $b$. +So, $b$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $T$. +Therefore, $c \le b$ as $c$ is the [[Definition:Supremum of Subset of Real Numbers|least upper bound]] of T. +Also, $c \ge a$ because $a \in T$ and $c$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $T$. +Accordingly, $c \in \left[{a \,.\,.\, b}\right]$. +=== Step 4: Finite Cover of $\left[{a \,.\,.\, y}\right]$ === +It is demonstrated that there exists a [[Definition:Finite Subset|finite subset]] of $S$ [[Definition:Cover of Set|covering]] $\left[{a \,.\,.\, y}\right]$ where $y > c$. +We know that $S$ [[Definition:Cover of Set|covers]] $\left[ {a \,.\,.\, b} \right]$. +Also, $c \in \left[{a \,.\,.\, b}\right]$. +From these two facts follows that there is a [[Definition:Set|set]] $O_c$ in $S$ that [[Definition:Element|contains]] $c$. +An $\epsilon \in \R_{>0}$ exists such that $\left({c - \epsilon \,.\,.\, c + \epsilon}\right) \subset O_c$ as $O_c$ is [[Definition:Open Set (Real Analysis)|open]]. +A number $x \in T$ exists in $\left({c - \epsilon \,.\,.\, c} \right)$ as $c$ is the [[Definition:Supremum of Subset of Real Numbers|least upper bound]] of $T$. +By the definition of $T$, a [[Definition:Finite Subset|finite subset]] $S_x$ of $S$ exists such that $S_x$ [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, x}\right]$ as $x \in T$. +Let $y$ be a number in $\left({c \,.\,.\, c + \epsilon}\right)$. +We observe that $y > c$. +We have that $\left\{{O_c}\right\}$ [[Definition:Cover of Set|covers]] $\left({c - \epsilon \,.\,.\, c + \epsilon}\right)$ as $\left({c - \epsilon \,.\,.\, c + \epsilon}\right) \subset O_c$. +Therefore, $\left\{{O_c}\right\}$ [[Definition:Cover of Set|covers]] $\left[{x \,.\,.\, y}\right]$ as $\left[{x \,.\,.\, y}\right] \subset \left({c - \epsilon \,.\,.\, c + \epsilon}\right)$. +We observe the [[Definition:Set|collection]] $S_x \cup \left\{{O_c}\right\}$: +: $S_x \cup \left\{{O_c}\right\}$ is a [[Definition:Subset|subset]] of $S$ +: $S_x \cup \left\{{O_c}\right\}$ is [[Definition:Finite Set|finite]] +: $S_x \cup \left\{{O_c}\right\}$ [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, y}\right]$, the [[Definition:Set Union|union]] of $\left[{a \,.\,.\, x}\right]$ and $\left[{x \,.\,.\, y}\right]$ +In other words, $S_x \cup \left\{ {O_c}\right\}$ is a [[Definition:Finite Subset|finite subset]] of $S$ [[Definition:Cover of Set|covering]] $\left[{a \,.\,.\, y}\right]$. +=== Step 5: $y > b$ === +It is demonstrated that $y > b$. +We know that $y > c$. +Therefore, $y \notin T$ as $c$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $T$. +Let us find out exactly why $y \notin T$. +There are two requirements for $y$ to be in $T$: +: $\left[{a \,.\,.\, y}\right]$ is [[Definition:Cover of Set|covered]] by a [[Definition:Finite Subset|finite subset]] of $S$ +: $y \in \left[{a \,.\,.\, b}\right]$ +The first requirement is satisfied as $S_x \cup \left\{{O_c}\right\}$ is a [[Definition:Finite Subset|finite subset]] of $S$ [[Definition:Cover of Set|covering]] $\left[{a \,.\,.\, y}\right]$. +Therefore, the second requirement is not satisfied as this is the last requirement and the only possibility to satisfy $y \notin T$. +Accordingly, $y \notin \left[{a \,.\,.\, b}\right]$. +Furthermore: +{{begin-eqn}} +{{eqn | l = y + | o = > + | r = c +}} +{{eqn | ll = \iff + | l = y + | o = > + | r = c \land c \ge a + | c = as $c \ge a$ since $c \in \left[{a \,.\,.\, b}\right]$ +}} +{{eqn | ll = \implies + | l = y + | o = > + | r = a +}} +{{eqn | ll = \iff + | l = y + | o = > + | r = a \land y∉[a..b] + | c = as $y \notin \left[{a \,.\,.\, b}\right]$ +}} +{{eqn | ll = \iff + | l = y + | o = > + | r = b +}} +{{end-eqn}} +=== Step 6: Finite Cover of $\left[{a \,.\,.\, b}\right]$ === +It is demonstrated that there exists a [[Definition:Finite Subset|finite subset]] of $S$ [[Definition:Cover of Set|covering]] $\left[{a \,.\,.\, b}\right]$. +We know that $S_x \cup \left\{{O_c}\right\}$ is a [[Definition:Finite Subset|finite subset]] of $S$ [[Definition:Cover of Set|covering]] $\left[{a \,.\,.\, y}\right]$. +Also, $\left[{a \,.\,.\, b}\right]$ is a [[Definition:Subset|subset]] of $\left[{a \,.\,.\, y}\right]$ as $y > b$. +Therefore, $S_x \cup \left\{{O_c}\right\}$ [[Definition:Cover of Set|covers]] $\left[{a \,.\,.\, b}\right]$. +So, $\left[{a \,.\,.\, b}\right]$ is [[Definition:Cover of Set|covered]] by a [[Definition:Finite Subset|finite subset]] of $S$. +{{qed}} +{{namedfor|Heinrich Eduard Heine|name2 = Émile Borel}} +\end{proof}<|endoftext|> +\section{Addition on 1-Based Natural Numbers is Cancellable} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Axiom:Axiomatization of 1-Based Natural Numbers|$1$-based natural numbers]]. +Let $+$ be [[Definition:Addition on 1-Based Natural Numbers|addition]] on $\N_{>0}$. +Then: +:$\forall a, b, c \in \N_{>0}: a + c = b + c \implies a = b$ +:$\forall a, b, c \in \N_{>0}: a + b = a + c \implies b = c$ +That is, $+$ is [[Definition:Cancellable Operation|cancellable]] on $\N_{>0}$. +\end{theorem} + +\begin{proof} +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], one and only one of the following holds: +:$a = b$ +:$a < b$ +:$b < a$ +Suppose $a < b$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Addition|Ordering on $1$-Based Natural Numbers is Compatible with Addition]]: +:$a + c < b + c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this contradicts the fact that $a + c = b + c$. +Similarly, suppose $b > a$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Addition|Ordering on $1$-Based Natural Numbers is Compatible with Addition]]: +:$b + c < a + c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this also contradicts the fact that $a + c = b + c$. +The only other possibility is that $a = b$. +So +:$\forall a, b, c \in \N_{>0}: a + c = b + c \implies a = b$ +and so $+$ is [[Definition:Right Cancellable Operation|right cancellable]] on $\N_{>0}$. +From [[Natural Number Addition is Commutative]] and [[Right Cancellable Commutative Operation is Left Cancellable]]: +:$\forall a, b, c \in \N_{>0}: a + b = a + c \implies b = c$ +So $+$ is both [[Definition:Right Cancellable Operation|right cancellable]] and [[Definition:Left Cancellable Operation|left cancellable]] on $\N_{>0}$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Addition on 1-Based Natural Numbers is Cancellable for Ordering} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Definition:1-Based Natural Numbers|$1$-based natural numbers]]. +Let $<$ be the [[Definition:Ordering on 1-Based Natural Numbers|strict ordering on $\N_{>0}$]]. +Let $+$ be [[Definition:Addition on 1-Based Natural Numbers|addition]] on $\N_{>0}$. +Then: +:$\forall a, b, c \in \N_{>0}: a + c < b + c \implies a < b$ +:$\forall a, b, c \in \N_{>0}: a + b < a + c \implies b < c$ +That is, $+$ is [[Definition:Cancellable Operation|cancellable]] on $\N_{>0}$ for $<$. +\end{theorem} + +\begin{proof} +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], one and only one of the following holds: +:$a = b$ +:$a < b$ +:$b < a$ +Let $a + c < b + c$. +Suppose $a = b$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Addition|Ordering on $1$-Based Natural Numbers is Compatible with Addition]]: +:$a + c = b + c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this contradicts the fact that $a + c < b + c$. +Similarly, suppose $b < a$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Addition|Ordering on $1$-Based Natural Numbers is Compatible with Addition]]: +:$b + c < a + c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this also contradicts the fact that $a + c < b + c$. +The only other possibility is that $a < b$. +So +:$\forall a, b, c \in \N_{>0}: a + c = b + c \implies a < b$ +and so $+$ is [[Definition:Right Cancellable Operation|right cancellable]] on $\N_{>0}$ for $<$. +Let $a + b < a + c$. +Suppose $b = c$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Addition|Ordering on $1$-Based Natural Numbers is Compatible with Addition]]: +:$a + b = a + c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this contradicts the fact that $a + b < a + c$. +Similarly, suppose $c < b$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Addition|Ordering on $1$-Based Natural Numbers is Compatible with Addition]]: +:$a + c < a + b$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this also contradicts the fact that $a + b < a + c$. +The only other possibility is that $b < c$. +So +:$\forall a, b, c \in \N_{>0}: a + b < a + c \implies b < c$ +and so $+$ is [[Definition:Left Cancellable Operation|left cancellable]] on $\N_{>0}$ for $<$. +From [[Natural Number Addition is Commutative]] and [[Right Cancellable Commutative Operation is Left Cancellable]]: +:$\forall a, b, c \in \N_{>0}: a + b = a + c \implies b = c$ +So $+$ is both [[Definition:Right Cancellable Operation|right cancellable]] and [[Definition:Left Cancellable Operation|left cancellable]] on $\N_{>0}$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Multiplication on 1-Based Natural Numbers is Cancellable} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Definition:1-Based Natural Numbers|$1$-based natural numbers]]. +Let $\times$ be [[Definition:Multiplication on 1-Based Natural Numbers|multiplication]] on $\N_{>0}$. +Then: +:$\forall a, b, c \in \N_{>0}: a \times c = b \times c \implies a = b$ +:$\forall a, b, c \in \N_{>0}: a \times b = a \times c \implies b = c$ +That is, $\times$ is [[Definition:Cancellable Operation|cancellable]] on $\N_{>0}$. +\end{theorem} + +\begin{proof} +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], one and only one of the following holds: +:$a = b$ +:$a < b$ +:$b < a$ +Suppose $a < b$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Multiplication|Ordering on $1$-Based Natural Numbers is Compatible with Multiplication]]: +:$a \times c < b \times c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this contradicts the fact that $a \times c = b \times c$. +Similarly, suppose $b > a$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Multiplication|Ordering on $1$-Based Natural Numbers is Compatible with Multiplication]]: +:$b \times c < a \times c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this also contradicts the fact that $a \times c = b \times c$. +The only other possibility is that $a = b$. +So +:$\forall a, b, c \in \N_{>0}: a \times c = b \times c \implies a = b$ +and so $\times$ is [[Definition:Right Cancellable Operation|right cancellable]] on $\N_{>0}$. +From [[Natural Number Multiplication is Commutative]] and [[Right Cancellable Commutative Operation is Left Cancellable]]: +:$\forall , b, c \in \N_{>0}: a \times b = a \times c \implies b = c$ +So $\times$ is both [[Definition:Right Cancellable Operation|right cancellable]] and [[Definition:Left Cancellable Operation|left cancellable]] on $\N_{>0}$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Multiplication on 1-Based Natural Numbers is Cancellable for Ordering} +Tags: Natural Numbers: 1-Based + +\begin{theorem} +Let $\N_{> 0}$ be the [[Definition:1-Based Natural Numbers|$1$-based natural numbers]]. +Let $\times$ be [[Definition:Multiplication on 1-Based Natural Numbers|multiplication]] on $\N_{>0}$. +Let $<$ be the [[Definition:Ordering on 1-Based Natural Numbers|strict ordering on $\N_{>0}$]]. +Then: +:$\forall a, b, c \in \N_{>0}: a \times c < b \times c \implies a < b$ +:$\forall a, b, c \in \N_{>0}: a \times b < a \times c \implies b < c$ +That is, $\times$ is [[Definition:Cancellable Operation|cancellable]] on $\N_{>0}$ for $<$. +\end{theorem} + +\begin{proof} +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], one and only one of the following holds: +:$a = b$ +:$a < b$ +:$b < a$ +Let $a \times c < b \times c$. +Suppose $a = b$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Multiplication|Ordering on $1$-Based Natural Numbers is Compatible with Multiplication]]: +:$a \times c = b \times c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this contradicts the fact that $a \times c < b \times c$. +Similarly, suppose $b < a$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Multiplication|Ordering on $1$-Based Natural Numbers is Compatible with Multiplication]]: +:$b \times c < a \times c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this also contradicts the fact that $a \times c < b \times c$. +The only other possibility is that $a < b$. +So +:$\forall a, b, c \in \N_{>0}: a \times c = b \times c \implies a < b$ +and so $\times$ is [[Definition:Right Cancellable Operation|right cancellable]] on $\N_{>0}$ for $<$. +Let $a \times b < a \times c$. +Suppose $b = c$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Multiplication|Ordering on $1$-Based Natural Numbers is Compatible with Multiplication]]: +:$a \times b = a \times c$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this contradicts the fact that $a \times b < a \times c$. +Similarly, suppose $c < b$. +Then by [[Ordering on 1-Based Natural Numbers is Compatible with Multiplication|Ordering on $1$-Based Natural Numbers is Compatible with Multiplication]]: +:$a \times c < a \times b$ +By [[Ordering on 1-Based Natural Numbers is Trichotomy|Ordering on $1$-Based Natural Numbers is Trichotomy]], this also contradicts the fact that $a \times b < a \times c$. +The only other possibility is that $b < c$. +So +:$\forall a, b, c \in \N_{>0}: a \times b < a \times c \implies b < c$ +and so $\times$ is [[Definition:Left Cancellable Operation|left cancellable]] on $\N_{>0}$ for $<$. +From [[Natural Number Multiplication is Commutative]] and [[Right Cancellable Commutative Operation is Left Cancellable]]: +:$\forall a, b, c \in \N_{>0}: a \times b = a \times c \implies b = c$ +So $+$ is both [[Definition:Right Cancellable Operation|right cancellable]] and [[Definition:Left Cancellable Operation|left cancellable]] on $\N_{>0}$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Index of Trivial Subgroup is Cardinality of Group} +Tags: Order of Groups, Index of Subgroups + +\begin{theorem} +Let $G$ be a [[Definition:Group|group]] whose [[Definition:Identity Element|identity element]] is $e$. +Let $\set e$ be the [[Definition:Trivial Subgroup|trivial subgroup]] of $G$. +Then: +:$\index G {\set e} = \order G$ +where: +:$\index G {\set e}$ denotes the [[Definition:Index of Subgroup|index]] of $\set e$ in $G$ +:$\order G$ denotes the [[Definition:Cardinality|cardinality]] of $G$. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Cardinality|cardinality]] and the [[Definition:Trivial Subgroup|trivial subgroup]]: +:$\order {\set e} = 1$ +From [[Lagrange's Theorem (Group Theory)|Lagrange's Theorem]]: +:$\index G {\set e} = \dfrac {\order G} {\order {\set e} } = \dfrac {\order G} 1 = \order G$ +{{qed}} +\end{proof}<|endoftext|> +\section{Index is One iff Subgroup equals Group} +Tags: Order of Groups, Index of Subgroups + +\begin{theorem} +Let $G$ be a [[Definition:Group|group]] whose [[Definition:Identity Element|identity element]] is $e$. +Let $H$ be a [[Definition:Subgroup|subgroup]] of $G$. +Then: +:$\index G H = 1 \iff G = H$ +where $\index G H$ denotes the [[Definition:Index of Subgroup|index]] of $H$ in $G$. +\end{theorem} + +\begin{proof} +For [[Definition:Finite Group|finite groups]], we can apply [[Lagrange's Theorem (Group Theory)|Lagrange's Theorem]]: +:$\index G H = \dfrac {\order G} {\order H}$ +But then: +: $\dfrac {\order G} {\order H} = 1 \iff \order G = \order H$ +Hence the result. +For the general case (including [[Definition:Infinite Group|infinite groups]]) we need to consider the [[Definition:Coset Space/Left Coset Space|(left) coset space]] $G / H$. +Note that we must have $e H = H \in G / H$. +Hence: +:$\index G H = 1 \iff G / H = \set H \iff G = H$ +{{qed}} +\end{proof}<|endoftext|> +\section{Set Equality is Equivalence Relation} +Tags: Set Theory, Equivalence Relations + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]]. +[[Definition:Set Equality|Set equality]] is an [[Definition:Equivalence Relation|equivalence relation]] on the [[Definition:Power Set|power set]] $\powerset S$ of $S$. +\end{theorem} + +\begin{proof} +Checking in turn each of the criteria for [[Definition:Equivalence Relation|equivalence]]: +=== Reflexivity === +Let $A \in \powerset S$. +From [[Set Equals Itself]]: +:$A = A$ +So [[Definition:Set Equality|set equality]] has been shown to be [[Definition:Reflexive Relation|reflexive]] on $\powerset S$. +{{qed|lemma}} +=== Symmetry === +Let $A, B \in \powerset S$. +Let $A = B$. +Then by definition of [[Definition:Set Equality|set equality]]: +:$A \subseteq B$ +:$B \subseteq A$ +from which it follows by definition of [[Definition:Set Equality|set equality]] that $B = A$. +So [[Definition:Set Equality|set equality]] has been shown to be [[Definition:Symmetric Relation|symmetric]] on $\powerset S$. +{{qed|lemma}} +=== Transitivity === +Let $A, B, C \in \powerset S$. +Let $A = B$ and $B = C$. +Then by definition of [[Definition:Set Equality|set equality]]: +:$(1): \quad A \subseteq B$ +:$(2): \quad B \subseteq C$ +:$(3): \quad C \subseteq B$ +:$(4): \quad B \subseteq A$ +From $(1)$ and $(2)$ and [[Subset Relation is Transitive]]: +:$A \subseteq C$ +From $(3)$ and $(4)$ and [[Subset Relation is Transitive]]: +:$C \subseteq A$ +from which it follows by definition of [[Definition:Set Equality|set equality]] that $A = C$. +So [[Definition:Set Equality|set equality]] has been shown to be [[Definition:Transitive Relation|transitive]] on $\powerset S$. +{{qed|lemma}} +[[Definition:Set Equality|Set equality]] has been shown to be [[Definition:Reflexive Relation|reflexive]], [[Definition:Symmetric Relation|symmetric]] and [[Definition:Transitive Relation|transitive]] on $\powerset S$. +Hence by definition it is an [[Definition:Equivalence Relation|equivalence relation]] on $\powerset S$. +{{qed}} +\end{proof}<|endoftext|> +\section{Subset Relation is Antisymmetric} +Tags: Subsets + +\begin{theorem} +The [[Definition:Relation|relation]] "is a [[Definition:Subset|subset]] of" is [[Definition:Antisymmetric Relation|antisymmetric]]: +:$\paren {R \subseteq S} \land \paren {S \subseteq R} \iff R = S$ +\end{theorem} + +\begin{proof} +This is a direct statement of the definition of [[Definition:Set Equality/Definition 2|set equality]]: +:$R = S := \paren {R \subseteq S} \land \paren {S \subseteq R}$ +{{qed}} +\end{proof}<|endoftext|> +\section{There Exists No Universal Set} +Tags: Naive Set Theory + +\begin{theorem} +There exists no [[Definition:Set|set]] which is an absolutely [[Definition:Universe (Set Theory)|universal set]]. +That is: +:$\neg \left({\exists \mathcal U: \forall T: T \in \mathcal U}\right)$ +where $T$ is any arbitrary [[Definition:Object|object]] at all. +That is, a [[Definition:Set|set]] that contains ''everything'' cannot exist. +\end{theorem} + +\begin{proof} +Suppose such a $\mathcal U$ exists. +Using the [[Axiom:Axiom of Specification|Axiom of Specification]], we can create the [[Definition:Set|set]]: +:$R = \left\{{x \in \mathcal U: x \notin x}\right\}$ +But from [[Russell's Paradox]], this set cannot exist. +Thus: +:$R \notin \mathcal U$ +and so $\mathcal U$ cannot contain everything. +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Disjoint Singletons is Doubleton} +Tags: Set Union, Singletons, Doubletons + +\begin{theorem} +Let $\set a$ and $\set b$ be [[Definition:Singleton|singletons]] such that $a \ne b$. +Then: +:$\set a \cup \set b = \set {a, b}$ +\end{theorem} + +\begin{proof} +Let $x \in \set a \cup \set b$. +Then by the [[Axiom:Axiom of Unions|Axiom of Unions]]: +:$x = a \lor x = b$ +It follows from the [[Axiom:Axiom of Pairing|Axiom of Pairing]] that: +:$x \in \set {a, b}$ +Thus by definition of [[Definition:Subset|subset]]: +:$\set a \cup \set b \subseteq \set {a, b}$ +{{qed|lemma}} +Let $x \in \set {a, b}$. +Then by the [[Axiom:Axiom of Pairing|Axiom of Pairing]]: +:$x = a \lor x = b$ +So by the [[Axiom:Axiom of Unions|Axiom of Unions]]: +:$x \in \set a \cup \set b$ +Thus by definition of [[Definition:Subset|subset]]: +:$\set {a, b} \subseteq \set a \cup \set b$ +{{qed|lemma}} +The result follows by definition of [[Definition:Set Equality/Definition 2|set equality]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Distributive Laws} +Tags: Set Union, Set Intersection + +\begin{theorem} +== [[Intersection Distributes over Union]] == +{{:Intersection Distributes over Union}} +\end{theorem}<|endoftext|> +\section{Intersection of Empty Set/Paradoxical Implications} +Tags: Set Intersection, Veridical Paradoxes + +\begin{theorem} +Although it appears counter-intuitive, the reasoning is sound. +This result is therefore classed as a [[Definition:Veridical Paradox|veridical paradox]]. +{{AuthorRef|Paul R. Halmos}} declares, in Section $5$ of his {{BookLink|Naive Set Theory|Paul R. Halmos}} of $1960$ that: +:''There is no profound problem here; it is merely a nuisance to be forced always to be making qualifications and exceptions just because some set somewhere along some construction might turn out to be empty. There is nothing to be done about this; it is just a fact of life.'' +However, later in that same work (in Section $9$, in the context of [[Definition:Indexed Family of Sets|indexed families of sets]]) he says that: +:''... an empty intersection does not make sense.'' +a sentiment which is repeated in the $2008$ collaboration with {{AuthorRef|Steven Givant}}, {{BookLink|Introduction to Boolean Algebras|Paul Halmos}}, p. $457$. +In {{BookReference|Topology|2000|James R. Munkres|ed = 2nd|edpage = Second Edition}}, the author recognizes the result, but does not adopt it: +:''If one has a given large set $X$ that is specified at the outset of the discussion to be one's "universe of discourse," and one considers only subsets of $X$ throughout, it is reasonable to let $\displaystyle \bigcap_{A \mathop \in \AA} A = X$ when $\AA$ is empty. Not all mathematicians follow this convention, however. To avoid difficulty, we shall not define the intersection when $\AA$ is empty.'' +{{BookReference|Topology: An Introduction with Application to Topological Groups|1967|George McCarty}} accepts this result, but cautiously: +:''A natural mnemonic for these extreme cases is that $\bigcap \SS$ "grows larger" as $\SS$ "grows smaller", and $\bigcup \SS$ grows smaller as $\SS$ grows smaller. No other convention is possible, but the case $\SS = \O$ will often be treated redundantly by itself in definitions and proofs, as a reminder of the null case.'' +{{BookReference|Set Theory|1999|András Hajnal|author2 = Peter Hamburger}} dismiss it casually: +:''As usual, we adopt the convention that in case $A = \O$ the expression $\bigcap A$ is defined only in case we work with the subsets of an underlying set $X$. In this case we put $\bigcap A = X$.'' +and {{BookReference|Programming, Games and Transportation Networks|1965|Claude Berge|author2 = A. Ghouila-Houri}} do not even acknowledge that there may be a problem in the first place: +:''In the case where $I = \O$, we have +::$\displaystyle \bigcap_{i \mathop \in I} A_i = X$;'' +:''this moreover is the only case where $X$ plays a role; in fact, if $I$ is not empty, clearly we have: +::$\displaystyle \bigcap_{i \mathop \in I} A_i = \set {x \mid x \in A_i \text { for every } i \in I}$. +\end{theorem}<|endoftext|> +\section{Ordered Pair/Kuratowski Formalization/Motivation} +Tags: Ordered Pairs + +\begin{theorem} +The only reason for the [[Kuratowski Formalization of Ordered Pair|Kuratowski formalization]] of [[Definition:Ordered Pair|ordered pairs]]: +:$\tuple {a, b} = \set {\set a, \set {a, b} }$ +is so their existence can be justified in the strictures of the [[Definition:Axiomatic Set Theory|axiomatic set theory]], in particular [[Definition:Zermelo-Fraenkel Set Theory|Zermelo-Fraenkel set theory (ZF)]]. +Once that has been demonstrated, there is no need to invoke it again. +The fact that this formulation allows that: +:$\tuple {a, b} = \tuple {c, d} \iff a = c, b = d$ +is its stated aim. +The fact that $\set {a, b} \in \tuple {a, b}$ is an unfortunate side-effect brought about by means of the definition. +It would be possible to add another [[Definition:Axiom|axiom]] to [[Definition:Zermelo-Fraenkel Set Theory|ZF]] or [[Definition:ZFC|ZFC]] specifically to allow for [[Definition:Ordered Pair|ordered pairs]] to be defined, and in some systems of [[Definition:Axiomatic Set Theory|axiomatic set theory]] this is what is done. +\end{theorem}<|endoftext|> +\section{Heine-Borel Theorem/Real Line/Closed and Bounded Set} +Tags: Real Analysis + +\begin{theorem} +Let $F$ be a [[Definition:Closed Set (Real Analysis)|closed]] and [[Definition:Bounded Subset of Real Numbers|bounded]] [[Definition:Real Number|real set]]. +Let $C$ be a [[Definition:Set|set]] of [[Definition:Open Set (Real Analysis)|open real sets]]. +Let $C$ be a [[Definition:Cover of Set|cover]] of $F$. +Then there is a [[Definition:Finite Subset|finite subset]] of $C$ that [[Definition:Cover of Set|covers]] $F$. +\end{theorem} + +\begin{proof} +We are given that $C$ is a [[Definition:Set|set]] of [[Definition:Open Set (Real Analysis)|open real sets]] that [[Definition:Cover of Set|covers]] $F$. +In other words, $C$ is an [[Definition:Open Cover|open cover]] of $F$. +We need to show that there is a [[Definition:Finite Subset|finite subset]] of $C$ that [[Definition:Cover of Set|covers]] $F$. +In other words, we need to show that $C$ has a [[Definition:Finite Subcover|finite subcover]]. +Let $F_o$ be the [[Definition:Relative Complement|complement]] of $F$ in $\R$. +By the definition of [[Definition:Closed Set (Real Analysis)|closed real set]], $F_o$ is [[Definition:Open Set (Real Analysis)|open]] as $F$ is [[Definition:Closed Set (Real Analysis)|closed]]. +=== Step 1: $C^*$ is an Open Cover of $\left[{a \,.\,.\, b}\right]$ === +It is demonstrated that $C^*$ is an [[Definition:Open Cover|open cover]] of $\left[{a \,.\,.\, b}\right]$. +Since $F$ is [[Definition:Bounded Subset of Real Numbers|bounded]], $F$ is contained in a [[Definition:Closed Set (Real Analysis)|closed]] and [[Definition:Bounded Subset of Real Numbers|bounded]] [[Definition:Closed Real Interval|interval]] $\left[{a \,.\,.\, b}\right]$ where $a, b \in \R$. +Define $C^* = C \cup \left\{ {F_o}\right\}$. +Like $C$, $C^*$ is a [[Definition:Set|set]] of [[Definition:Open Set (Real Analysis)|open real sets]] as $F_o$ is [[Definition:Open Set (Real Analysis)|open]]. +$C^*$ [[Definition:Cover of Set|covers]] $F \cup F_o$ as $C$ [[Definition:Cover of Set|covers]] $F$ and $\left\{ {F_o} \right\}$ [[Definition:Cover of Set|covers]] $F_o$. +$F_o \cup F$ equals $\R$ as $F_o$ is the [[Definition:Relative Complement|complement]] of $F$ in $\R$. +So $C^*$ [[Definition:Cover of Set|covers]] $\R$. +Furthermore, $C^*$ is an [[Definition:Open Cover|open cover]] of $\left[{a \,.\,.\, b}\right]$ as $\left[{a \,.\,.\, b}\right]$ is a [[Definition:Subset|subset]] of $\R$. +=== Step 2: $C^*$ has a Finite Subcover === +It is demonstrated that $C^*$ has a [[Definition:Finite Subcover|finite subcover]] $C^*_f$. +$C^*$ is an [[Definition:Open Cover|open cover]] of the [[Definition:Closed Set (Real Analysis)|closed]] and [[Definition:Bounded Subset of Real Numbers|bounded]] [[Definition:Closed Real Interval|interval]] $\left[{a \,.\,.\, b}\right]$. +Therefore, by [[Open Cover of Closed and Bounded Real Interval has Finite Subcover]], $C^*$ has a [[Definition:Finite Subcover|finite subcover]] $C^*_f$. +=== Step 3: $C$ has a Finite Subcover === +It is demonstrated that $C$ has a [[Definition:Finite Subcover|finite subcover]] $C_f$. +Note that $F_o$ is the only [[Definition:Element|element]] in $C^*$ that is not an [[Definition:Element|element]] of $C$. +Therefore, $F_o$ is the only possible [[Definition:Element|element]] in $C^*_f$ that is not an [[Definition:Element|element]] of $C$ as $C^*_f$ is a [[Definition:Subset|subset]] of $C^*$. +This means that $C^*_f$ \ $\left\{ {F_o} \right\}$ is a [[Definition:Subset|subset]] of $C$. +Define $C_f$ = $C^*_f$ \ $\left\{ {F_o} \right\}$. +According to the reasoning above, $C_f$ is a [[Definition:Subset|subset]] of $C$. +Also, $C_f$ is [[Definition:Finite Set|finite]] as $C^*_f$ is [[Definition:Finite Set|finite]]. +What remains is to show that $C_f$ [[Definition:Cover of Set|covers]] $F$. +We have: +{{begin-eqn}} +{{eqn | o = \implies + | l = F \subseteq \left[ {a \,.\,.\, b} \right] + | r = F \subseteq \left[ {a \,.\,.\, b} \right] \subseteq \bigcup_{O \mathop \in C^*_f} O + | c = as $C^*_f$ is a [[Definition:Cover of Set|cover]] of $\left[ {a \,.\,.\, b} \right]$ +}} +{{eqn | o = \implies + | r = F \subseteq \bigcup_{O \mathop \in C^*_f} O +}} +{{eqn | o = \implies + | r = F \cap \complement F_o \subseteq (\bigcup_{O \mathop \in C^*_f} O) \cap \complement F_o + | c = [[Set Intersection Preserves Subsets/Corollary|Set Intersection Preserves Subsets]] +}} +{{eqn | r = F \cap \complement \complement F \subseteq (\bigcup_{O \mathop \in C^*_f} O) \cap \complement F_o + | c = as $F_o = \complement F$, the [[Definition:Relative Complement|complement]] of $F$ in $\R$ +}} +{{eqn | r = F \cap F \subseteq (\bigcup_{O \mathop \in C^*_f} O) \cap \complement F_o + | c = [[Relative Complement of Relative Complement]] +}} +{{eqn | r = F \subseteq (\bigcup_{O \mathop \in C^*_f} O) \cap \complement F_o + | c = [[Intersection is Idempotent]] +}} +{{end-eqn}} +Furthermore, as $F \subseteq \left[{a \,.\,.\, b}\right]$: +{{begin-eqn}} +{{eqn | o = \subseteq + | l = F + | r = \left({\bigcup_{O \mathop \in C^*_f} O}\right) \cap \complement F_o +}} +{{eqn | o = \subseteq + | r = \left({\bigcup_{O \mathop \in C^*_f \cup \left\{ {F_o}\right\} } O}\right) \cap \complement F_o + | c = by $\displaystyle \left({\bigcup_{O \mathop \in C^*_f} O}\right) \subseteq \left({\bigcup_{O \mathop \in C^*_f \cup \left\{ {F_o}\right\} } O}\right)$ and [[Set Intersection Preserves Subsets/Corollary|Set Intersection Preserves Subsets]] +}} +{{eqn | r = \left({\bigcup_{O \mathop \in C^*_f \cup \left\{ {F_o}\right\} } O}\right) \setminus F_o + | c = as [[Set Difference as Intersection with Complement|set intersection with complement equals set difference]] +}} +{{eqn | r = \left({\bigcup_{O \mathop \in \left({C^*_f \setminus \left\{ {F_o}\right\} \cup \left\{ {F_o}\right\} }\right)} O}\right) \setminus F_o + | c = by [[Set Difference Union Second Set is Union]] +}} +{{eqn | r = \left({\left({\bigcup_{O \mathop \in \left({C^*_f \setminus \left\{ {F_o}\right\} }\right)} O}\right) \cup F_o}\right) \setminus F_o + | c = +}} +{{eqn | r = \left({\bigcup_{O \mathop \in \left({C^*_f \setminus \left\{ {F_o}\right\} }\right)} O}\right) \setminus F_o + | c = [[Set Difference with Union is Set Difference]] +}} +{{eqn | o = \subseteq + | r = \bigcup_{O \mathop \in \left({C^*_f \setminus \left\{ {F_o}\right\} }\right)} O + | c = [[Set Difference is Subset]] +}} +{{eqn | r = \left({\bigcup_{O \mathop \in C_f} O}\right) + | c = Definition of $C_f$ +}} +{{end-eqn}} +Thus, $C_f$ [[Definition:Cover of Set|covers]] $F$. +This finishes the proof. +{{qed}} + +{{Namedfor|Heinrich Eduard Heine|name2 = Émile Borel}} +\end{proof}<|endoftext|> +\section{Cardinality of Set of All Mappings from Empty Set} +Tags: Combinatorics, Mapping Theory + +\begin{theorem} +Let $T$ be a [[Definition:Set|set]]. +Let $T^\O$ denote the [[Definition:Set of All Mappings|set of all mappings]] from $\O$ to $S$. +Then: +:$\card {T^\O} = 1$ +where $\card {T^\O}$ denotes the [[Definition:Cardinality|cardinality]] of $\O^S$. +\end{theorem} + +\begin{proof} +The only element of $T^\O$ is the [[Definition:Null Relation|null relation]]: +:$\varnothing \times T$ +From [[Null Relation is Mapping iff Domain is Empty Set]], $\O \times T$ is a [[Definition:Mapping|mapping]] from $\O$ to $T$. +The result follows from [[Empty Mapping is Unique]]. +That is: +:$\card {T^\O} = 1$ +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Set of All Mappings to Empty Set} +Tags: Combinatorics, Mapping Theory + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]]. +Let $\O^S$ be the [[Definition:Set of All Mappings|set of all mappings]] from $S$ to $\O$. +Then: +:$\card {\O^S} = \begin{cases} +1 & : S = \O \\ +0 & : S \ne \O +\end{cases}$ +where $\card {\O^S}$ denotes the [[Definition:Cardinality|cardinality]] of $\O^S$. +\end{theorem} + +\begin{proof} +From [[Null Relation is Mapping iff Domain is Empty Set]], the [[Definition:Null Relation|null relation]]: +:$\mathcal R = \O \subseteq S \times T$ +is not a [[Definition:Mapping|mapping]] unless $S = \O$. +So if $S \ne \O$: +:$\card {\O^S} = 0$ +If $S = \O$: +:$\card {\O^S} = 1$ +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Intersection Distributes over Union/Family of Sets/Corollary} +Tags: Intersection Distributes over Union + +\begin{theorem} +Let $I$ and $J$ be [[Definition:Indexing Set|indexing sets]]. +Let $\family {A_\alpha}_{\alpha \mathop \in I}$ and $\family {B_\beta}_{\beta \mathop \in J}$ be [[Definition:Indexed Family of Subsets|indexed families of subsets]] of a [[Definition:Set|set]] $S$. +Then: +:$\displaystyle \bigcup_{\tuple {\alpha, \beta} \mathop \in I \times J} \paren {A_\alpha \cap B_\beta} = \paren {\bigcup_{\alpha \mathop \in I} A_\alpha} \cap \paren {\bigcup_{\beta \mathop \in J} B_\beta}$ +where $\displaystyle \bigcup_{\alpha \mathop \in I} A_\alpha$ denotes the [[Definition:Union of Family|union]] of $\family {A_\alpha}_{\alpha \mathop \in I}$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \bigcup_{\alpha \mathop \in I} \paren {A_\alpha \cap B} + | r = \paren {\bigcup_{\alpha \mathop \in I} A_\alpha} \cap B + | c = [[Intersection Distributes over Union/Family of Sets|Intersection Distributes over Union: Family of Sets]] +}} +{{eqn | ll= \leadsto + | l = \bigcup_{\alpha \mathop \in I} \paren {A_\alpha \cap \paren {\bigcup_{\beta \mathop \in J} B_\beta} } + | r = \paren {\bigcup_{\alpha \mathop \in I} A_\alpha} \cap \paren {\bigcup_{\beta \mathop \in J} B_\beta} + | c = setting $\displaystyle B = \paren {\bigcup_{\beta \mathop \in J} B_\beta}$ +}} +{{eqn | ll= \leadsto + | l = \bigcup_{\alpha \mathop \in I} \paren {\bigcup_{\beta \mathop \in J} \paren {A_\alpha \cap B_\beta} } + | r = \paren {\bigcup_{\alpha \mathop \in I} A_\alpha} \cap \paren {\bigcup_{\beta \mathop \in J} B_\beta} + | c = [[Intersection Distributes over Union/Family of Sets|Intersection Distributes over Union: Family of Sets]] +}} +{{eqn | ll= \leadsto + | l = \bigcup_{\paren {\alpha, \beta} \mathop \in I \times J} \paren {A_\alpha \cap B_\beta} + | r = \paren {\bigcup_{\alpha \mathop \in I} A_\alpha} \cap \paren {\bigcup_{\beta \mathop \in J} B_\beta} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Union Distributes over Intersection/Family of Sets/Corollary} +Tags: Union Distributes over Intersection + +\begin{theorem} +Let $I$ and $J$ be [[Definition:Indexing Set|indexing sets]]. +Let $\family {A_\alpha}_{\alpha \mathop \in I}$ and $\family {B_\beta}_{\beta \mathop \in J}$ be [[Definition:Indexed Family of Subsets|indexed families of subsets]] of a [[Definition:Set|set]] $S$. +Then: +:$\displaystyle \bigcap_{\tuple{\alpha, \beta} \mathop \in I \times J} \paren {A_\alpha \cup B_\beta} = \paren {\bigcap_{\alpha \mathop \in I} A_\alpha} \cup \paren {\bigcap_{\beta \mathop \in J} B_\beta}$ +where $\displaystyle \bigcap_{\alpha \mathop \in I} A_\alpha$ denotes the [[Definition:Intersection of Family|intersection]] of $\family {A_\alpha}_{\alpha \mathop \in I}$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \bigcap_{\alpha \mathop \in I} \paren {A_\alpha \cup B} + | r = \paren {\bigcap_{\alpha \mathop \in I} A_\alpha} \cup B + | c = [[Intersection Distributes over Union/Family of Sets|Intersection Distributes over Union: Family of Sets]] +}} +{{eqn | ll= \leadsto + | l = \bigcap_{\alpha \mathop \in I} \paren {A_\alpha \cup \paren {\bigcap_{\beta \mathop \in J} B_\beta} } + | r = \paren {\bigcap_{\alpha \mathop \in I} A_\alpha} \cup \paren {\bigcap_{\beta \mathop \in J} B_\beta} + | c = setting $\displaystyle B = \paren {\bigcap_{\beta \mathop \in J} B_\beta}$ +}} +{{eqn | ll= \leadsto + | l = \bigcap_{\alpha \mathop \in I} \paren {\bigcap_{\beta \mathop \in J} \paren {A_\alpha \cup B_\beta} } + | r = \paren {\bigcap_{\alpha \mathop \in I} A_\alpha} \cup \paren {\bigcap_{\beta \mathop \in J} B_\beta} + | c = [[Intersection Distributes over Union/Family of Sets|Intersection Distributes over Union: Family of Sets]] +}} +{{eqn | ll= \leadsto + | l = \bigcap_{\paren {\alpha, \beta} \mathop \in I \times J} \paren {A_\alpha \cup B_\beta} + | r = \paren {\bigcap_{\alpha \mathop \in I} A_\alpha} \cup \paren {\bigcap_{\beta \mathop \in J} B_\beta} + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cartesian Product of Unions/General Result} +Tags: Cartesian Product, Set Union + +\begin{theorem} +Let $I$ and $J$ be [[Definition:Indexing Set|indexing sets]]. +Let $\family {A_i}_{i \mathop \in I}$ and $\family {B_j}_{j \mathop \in J}$ be [[Definition:Indexed Family of Sets|families of sets]] [[Definition:Indexing Set|indexed]] by $I$ and $J$ respectively. +Then: +:$\displaystyle \paren {\bigcup_{i \mathop \in I} A_i} \times \paren {\bigcup_{j \mathop \in J} B_j} = \bigcup_{\tuple {i, j} \mathop \in I \times J} \paren {A_i \times B_j}$ +where: +:$\displaystyle \bigcup_{i \mathop \in I} A_i$ denotes the [[Definition:Union of Family|union of $\family {A_i}_{i \mathop \in I}$]] and so on +:$\times$ denotes [[Definition:Cartesian Product|Cartesian product]]. +\end{theorem}<|endoftext|> +\section{Preimages All Exist iff Surjection/Corollary} +Tags: Surjections, Inverse Mappings + +\begin{theorem} +:$\forall B \subseteq T, B \ne \O: f^{-1} \sqbrk B \ne \O$ +{{iff}}: +:$f$ is a [[Definition:Surjection|surjection]] +where $f^{-1} \sqbrk B$ denotes the [[Definition:Preimage of Subset under Mapping|preimage]] of $B \subseteq T$. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Let $f$ be a [[Definition:Surjection|surjection]]. +Let $B \subseteq T$ such that $B \ne \varnothing$. +Then: +:$\exists t \in T: t \in B$ +From [[Preimages All Exist iff Surjection]]: +:$\map {f^{-1} } t \ne \O$ +As $t \in B$ it follows from [[Preimage of Subset is Subset of Preimage]] that: +:$f^{-1} \sqbrk B \ne \O$ +$B$ is arbitrary, so: +:$\forall B \subseteq T, B \ne \O: f^{-1} \sqbrk B \ne \O$ +{{qed|lemma}} +=== Sufficient Condition === +Suppose that: +:$\forall B \subseteq T, B \ne \O: f^{-1} \sqbrk B \ne \O$ +{{AimForCont}} $f$ is not a [[Definition:Surjection|surjection]]. +Then by definition: +:$\exists t \in T: \neg \paren {\exists s \in S: \map f s = t}$ +That is: +:$\exists \set t \subseteq T: f^{-1} \sqbrk {\set t} = \O$ +which contradicts the hypothesis. +So by [[Proof by Contradiction]], $f$ is a [[Definition:Surjection|surjection]]. +Hence the result. +{{Qed}} +\end{proof}<|endoftext|> +\section{Image of Preimage under Relation is Subset} +Tags: Preimages under Relations, Composite Relations + +\begin{theorem} +Let $\mathcal R \subseteq S \times T$ be a [[Definition:Relation|relation]]. +Then: +:$B \subseteq T \implies \paren {\mathcal R \circ \mathcal R^{-1} } \sqbrk B \subseteq B$ +where: +:$\mathcal R \sqbrk B$ denotes the [[Definition:Image of Subset under Relation|image of $B$ under $\mathcal R$]] +:$\mathcal R^{-1} \sqbrk B$ denotes the [[Definition:Preimage of Subset under Relation|preimage of $B$ under $\mathcal R$]] +:$\mathcal R \circ \mathcal R^{-1}$ denotes [[Definition:Composition of Relations|composition]] of $\mathcal R$ and $\mathcal R^{-1}$. +\end{theorem} + +\begin{proof} +Let $B \subseteq T$. +Then: +{{begin-eqn}} +{{eqn | l = y + | o = \in + | r = \paren {\mathcal R \circ \mathcal R^{-1} } \sqbrk B +}} +{{eqn | ll= \leadsto + | l = y + | o = \in + | r = \mathcal R \sqbrk {\mathcal R^{-1} \sqbrk B} + | c = Definition of [[Definition:Composition of Relations|Composition of Relations]] +}} +{{eqn | ll= \leadsto + | l = \exists x \in \mathcal R^{-1} \sqbrk B: \tuple {x, y} + | o = \in + | r = \mathcal R +}} +{{eqn | ll= \leadsto + | l = y + | o = \in + | r = B + | c = +}} +{{end-eqn}} +So by definition of [[Definition:Subset|subset]]: +:$B \subseteq T \implies \paren {\mathcal R \circ \mathcal R^{-1} }\sqbrk B \subseteq B$ +{{qed}} +\end{proof}<|endoftext|> +\section{Inverse of Direct Image Mapping does not necessarily equal Inverse Image Mapping} +Tags: Direct Image Mappings, Inverse Image Mappings + +\begin{theorem} +Let $S$ and $T$ be [[Definition:Set|sets]]. +Let $\mathcal R \subseteq S \times T$ be a [[Definition:Relation|relation]]. +Let $\mathrel R^\to$ be the [[Definition:Direct Image Mapping of Relation|direct image mapping]] of $\mathcal R$. +Let $\mathrel R^\gets$ be the [[Definition:Inverse Image Mapping of Relation|inverse image mapping]] of $\mathcal R$. +Then it is not necessarily the case that: +:$\paren {\mathrel R^\to}^{-1} = \mathcal R^\gets$ +where $\paren {\mathrel R^\to}^{-1}$ denote the [[Definition:Inverse of Mapping|inverse]] of $\mathrel R^\to$. +That is, the [[Definition:Inverse Relation|inverse]] of the [[Definition:Direct Image Mapping of Relation|direct image mapping]] of $\mathcal R$ does not always equal the [[Definition:Inverse Image Mapping of Relation|inverse image mapping]] of $\mathcal R$. +\end{theorem} + +\begin{proof} +[[Proof by Counterexample]]: +Let $S = T = \set {0, 1}$. +Let $\mathcal R = \set {\tuple {0, 0}, \tuple {0, 1} }$. +We have that: +:$\mathcal R^{-1} = \set {\tuple {0, 0}, \tuple {1, 0} }$ +:$\powerset S = \powerset T = \set {\O, \set 0, \set 1, \set {0, 1} }$ +Thus, by inspection: +{{begin-eqn}} +{{eqn | l = \map {\mathrel R^\to} \O + | r = \O + | c = +}} +{{eqn | l = \map {\mathrel R^\to} {\set 0} + | r = \set {0, 1} + | c = +}} +{{eqn | l = \map {\mathrel R^\to} {\set 1} + | r = \O + | c = +}} +{{eqn | l = \map {\mathrel R^\to} {\set {0, 1} } + | r = \set {0, 1} + | c = +}} +{{end-eqn}} +Note that $\paren {\mathrel R^\to}^{-1}$ is the [[Definition:Inverse of Mapping|inverse of a mapping]] which is neither an [[Definition:Injection|injection]] nor a [[Definition:Surjection|surjection]], and so is itself not a [[Definition:Mapping|mapping]] from $\powerset T$ to $\powerset S$. +{{begin-eqn}} +{{eqn | l = \map {\paren {\mathrel R^\to}^{-1} } \O + | r = \set {\O, \set 1} + | c = +}} +{{eqn | l = \map {\paren {\mathrel R^\to}^{-1} } {\set 0} + | r = \O + | c = +}} +{{eqn | l = \map {\paren {\mathrel R^\to}^{-1} } {\set 1} + | r = \O + | c = +}} +{{eqn | l = \map {\paren {\mathrel R^\to}^{-1} } {\set {0, 1} } + | r = \set {\set 0, \set {0, 1} } + | c = +}} +{{end-eqn}} +This can be seen to be completely different from $\mathrel R^\gets$, which can be determined by inspection to be: +{{begin-eqn}} +{{eqn | l = \map {\mathrel R^\gets} \O + | r = \O + | c = +}} +{{eqn | l = \map {\mathrel R^\gets} {\set 0} + | r = \set 0 + | c = +}} +{{eqn | l = \map {\mathrel R^\gets} {\set 1} + | r = \set 0 + | c = +}} +{{eqn | l = \map {\mathrel R^\gets} {\set {0, 1} } + | r = \set 0 + | c = +}} +{{end-eqn}} +{{qed}} +[[Category:Direct Image Mappings]] +[[Category:Inverse Image Mappings]] +35arzqf6eutbis2oqntixzfkvhvw995 +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Maximal Element} +Tags: Order Theory + +\begin{theorem} +Let $\left({S, \preceq}\right)$ be an [[Definition:Ordered Set|ordered set]]. +Let $T \subseteq S$ be a [[Definition:Subset|subset]] of $S$. +{{TFAE|def = Maximal Element}} +\end{theorem} + +\begin{proof} +=== Definition 1 implies Definition 2 === +Let $x$ be an [[Definition:Maximal/Ordered Set/Definition 1|maximal element by definition 1]]. +That is: +:$(1): \quad \forall y \in T: x \preceq y \implies x = y$ +Aiming for a [[Definition:Contradiction|contradiction]], suppose that: +:$\exists y \in T: x \prec y$ +Then by definition: +:$x \preceq y \land x \ne y$ +which [[Definition:Contradiction|contradicts]] $(1)$. +Thus by [[Proof by Contradiction]]: +:$\nexists y \in T: x \prec y$ +That is $x$ is a [[Definition:Maximal/Ordered Set/Definition 2|maximal element by definition 2]]. +{{qed|lemma}} +=== Definition 2 implies Definition 1 === +Let $x$ be a [[Definition:Maximal/Ordered Set/Definition 2|maximal element by definition 2]]. +That is: +:$(2): \quad \nexists y \in T: x \prec y$ +Aiming for a [[Definition:Contradiction|contradiction]], suppose that: +:$\exists y \in T: x \preceq y: x \ne y$ +That is: +:$\exists y \in T: x \prec y$ +which [[Definition:Contradiction|contradicts]] $(2)$. +Thus: +:$\forall y \in T: x \preceq y \implies x = y$ +Thus $x$ is a [[Definition:Maximal/Ordered Set/Definition 1|maximal element by definition 1]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Minimal Element} +Tags: Order Theory + +\begin{theorem} +Let $\struct {S, \preceq}$ be an [[Definition:Ordered Set|ordered set]]. +Let $T \subseteq S$ be a [[Definition:Subset|subset]] of $S$. +{{TFAE|def = Minimal Element}} +\end{theorem} + +\begin{proof} +=== Definition 1 implies Definition 2 === +Let $x$ be an [[Definition:Minimal/Ordered Set/Definition 1|minimal element by definition 1]]. +That is: +:$(1): \quad \forall y \in T: y \preceq x \implies x = y$ +{{AimForCont}}: +:$\exists y \in T: y \prec x$ +Then by definition: +:$y \preceq x \land x \ne y$ +which [[Definition:Contradiction|contradicts]] $(1)$. +Thus by [[Proof by Contradiction]]: +:$\nexists y \in T: y \prec x$ +That is $x$ is a [[Definition:Minimal/Ordered Set/Definition 2|minimal element by definition 2]]. +{{qed|lemma}} +=== Definition 2 implies Definition 1 === +Let $x$ be a [[Definition:Minimal/Ordered Set/Definition 2|minimal element by definition 2]]. +That is: +:$(2): \quad \nexists y \in T: y \prec x$ +{{AimForCont}}: +:$\exists y \in T: y \preceq x: x \ne y$ +That is: +:$\exists y \in T: y \prec x$ +which [[Definition:Contradiction|contradicts]] $(2)$. +Thus: +:$\forall y \in T: y \preceq x \implies x = y$ +Thus $x$ is a [[Definition:Minimal/Ordered Set/Definition 1|minimal element by definition 1]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Singleton of Power Set less Empty Set is Minimal Subset} +Tags: Power Set + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]] which is [[Definition:Non-Empty Set|non-empty]]. +Let $\mathcal C = \mathcal P \left({S}\right) \setminus \varnothing$, that is, the [[Definition:Power Set|power set]] of $S$ without the [[Definition:Empty Set|empty set]]. +Let $x \in S$. +Then $\left\{{x}\right\}$ is a [[Definition:Minimal Element|minimal element]] of the [[Definition:Ordered Structure|ordered structure]] $\left({\mathcal C, \subseteq}\right)$. +\end{theorem} + +\begin{proof} +Let $y \in \mathcal C$ such that $y \subseteq \left\{{x}\right\}$. +We have that $\varnothing \notin \mathcal C$. +Therefore: +:$\exists z \in S: z \in y$ +But as $y \subseteq \left\{{x}\right\}$ it follows that: +:$z \in \left\{{x}\right\}$ +and so by definition of [[Definition:Singleton|singleton]]: +:$z = x$ +and so: +:$y = \left\{{x}\right\}$ +and so: +:$y = x$ +Thus, by definition, $\left\{{x}\right\}$ is a [[Definition:Minimal Element|minimal element]] of $\left({\mathcal C, \subseteq}\right)$. +{{qed}} +\end{proof}<|endoftext|> +\section{Power Set less Empty Set has no Smallest Element iff not Singleton} +Tags: Power Set + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]] which is [[Definition:Non-Empty Set|non-empty]]. +Let $\mathcal C = \mathcal P \left({S}\right) \setminus \varnothing$, that is, the [[Definition:Power Set|power set]] of $S$ without the [[Definition:Empty Set|empty set]]. +Then the [[Definition:Ordered Structure|ordered structure]] $\left({\mathcal C, \subseteq}\right)$ has no [[Definition:Smallest Element|smallest element]] {{iff}} $S$ is not a [[Definition:Singleton|singleton]]. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Let $S$ not be a [[Definition:Singleton|singleton]]. +Then $\exists x, y \in S: x \ne y$. +Let $Z \in \mathcal C$ be the [[Definition:Smallest Element|smallest element]] of $\mathcal C$. +Then: +:$\forall T \in \mathcal C: Z \subseteq T$ +But by [[Singleton of Power Set less Empty Set is Minimal Subset]], both $\left\{{x}\right\}$ and $\left\{{y}\right\}$ are [[Definition:Minimal Element|minimal elements]] of $\left({\mathcal C, \subseteq}\right)$. +Therefore it cannot be the case that $Z \subseteq \left\{{x}\right\}$ and $Z \subseteq \left\{{y}\right\}$. +Therefore $\left({\mathcal C, \subseteq}\right)$ has no [[Definition:Smallest Element|smallest element]]. +{{qed|lemma}} +=== Sufficient Condition === +Let the [[Definition:Ordered Structure|ordered structure]] $\left({\mathcal C, \subseteq}\right)$ has no [[Definition:Smallest Element|smallest element]]. +Aiming for a [[Definition:Contradiction|contradiction]], suppose $S$ is a [[Definition:Singleton|singleton]]. +Let $S = \left\{{x}\right\}$. +Then: +:$\mathcal C = \left\{{\left\{{x}\right\}}\right\}$ +Then: +:$\forall y \in \mathcal C: y \subseteq \left\{{x}\right\}$ +trivially. +Thus $\left({\mathcal C, \subseteq}\right)$ has a [[Definition:Smallest Element|smallest element]] which is $\left\{{x}\right\}$. +By [[Proof by Contradiction]] it follows that $S$ is not a [[Definition:Singleton|singleton]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Natural Numbers under Multiplication form Semigroup} +Tags: Natural Numbers, Examples of Semigroups + +\begin{theorem} +Let $\N$ be the set of [[Definition:Natural Numbers|natural numbers]]. +Let $\times$ denote the operation of [[Definition:Natural Number Multiplication|multiplication]] on $\N$. +The [[Definition:Algebraic Structure|structure]] $\struct {\N, \times}$ forms a [[Definition:Semigroup|semigroup]]. +\end{theorem} + +\begin{proof} +=== Closure === +We have that [[Natural Number Multiplication is Closed]]. +That is, $\struct {\N, \times}$ is [[Definition:Closed Algebraic Structure|closed]]. +{{qed|lemma}} +=== Associativity === +We have that [[Natural Number Multiplication is Associative]]. +{{qed|lemma}} +Thus the criteria are fulfilled for $\struct {\N, \times}$ to form a [[Definition:Semigroup|semigroup]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Non-Zero Natural Numbers under Addition form Semigroup} +Tags: Natural Numbers, Examples of Semigroups + +\begin{theorem} +Let $\N_{>0}$ be the set of [[Definition:Natural Numbers|natural numbers]] without [[Definition:Zero (Number)|zero]], that is: +:$\N_{>0} = \N \setminus \set 0$ +Let $+$ denote [[Definition:Natural Number Addition|natural number addition]]. +The [[Definition:Algebraic Structure|structure]] $\struct {\N_{>0}, +}$ forms a [[Definition:Semigroup|semigroup]]. +\end{theorem} + +\begin{proof} +This is a specific instance of [[Natural Numbers Bounded Below under Addition form Commutative Semigroup]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Bernoulli's Hanging Chain Problem} +Tags: Second Order ODEs + +\begin{theorem} +Consider a uniform [[Definition:Chain (Physics)|chain]] $C$ whose [[Definition:Physical Property|physical properties]] are as follows: +:$C$ is of [[Definition:Length (Linear Measure)|length]] $l$ +:The [[Definition:Mass|mass]] per unit [[Definition:Length (Linear Measure)|length]] of $C$ is $m$ +:$C$ is of [[Definition:Zero|zero]] [[Definition:Stiffness (Physics)|stiffness]]. +Let $C$ be suspended in a [[Definition:Vertical Line|vertical line]] from a fixed point and otherwise free to move. +Let $C$ be slightly disturbed in a [[Definition:Vertical Plane|vertical plane]] from its position of [[Definition:Stable Equilibrium|stable equilibrium]]. +Let $\map y t$ be the [[Definition:Horizontal|horizontal]] [[Definition:Displacement|displacement]] at [[Definition:Time|time]] $t$ from its position of [[Definition:Stable Equilibrium|stable equilibrium]] of a [[Definition:Particle|particle]] of $C$ which is a [[Definition:Height (Linear Measure)|vertical distance]] $x$ from its point of attachment. +The [[Definition:Second Order Ordinary Differential Equation|$2$nd order ODE]] describing the motion of $y$ is: +:$\dfrac {\d^2 y} {\d t^2} = g \paren {l - x} \dfrac {\d^2 y} {\d x^2} - g \dfrac {\d y} {\d x}$ +\end{theorem} + +\begin{proof} +{{ProofWanted}} +{{Namedfor|Daniel Bernoulli|cat = Bernoulli, Daniel}} +\end{proof}<|endoftext|> +\section{Real Line Continuity by Inverse of Mapping} +Tags: Real Analysis, Continuity + +\begin{theorem} +Let $f$ be a [[Definition:Real Function|real function]]. +Let the [[Definition:Domain of Mapping|domain]] of $f$ be [[Definition:Open Set (Real Analysis)|open]]. +Let $f^{-1}$ be the [[Definition:Inverse of Mapping|inverse]] of $f$. +Then $f$ is [[Definition:Continuous Real Function|continuous]] {{iff}}: +:for every [[Definition:Open Set (Real Analysis)|open real set]] $O$ that overlaps with the [[Definition:Image of Mapping|image]] of $f$, the [[Definition:Preimage of Subset under Mapping|preimage]] $f^{-1} \left [{O}\right]$ is [[Definition:Open Set (Real Analysis)|open]]. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Let $\operatorname{Dom} \left( {f} \right)$ be the [[Definition:Domain of Mapping|domain]] of $f$. +Let $\operatorname{Im} \left( {f} \right)$ be the [[Definition:Image of Mapping|image]] of $f$. +Let $f^{-1} \left [{O}\right]$ be the [[Definition:Preimage of Subset under Mapping|preimage]] of $O$ under $f$. +Thus by definition: +: $\operatorname{Im} \left( {f} \right)$ is the [[Definition:Set|set]] of [[Definition:Element|points]] $q$ in the [[Definition:Codomain of Mapping|codomain]] of $f$ satisfying $q = f \left( {p} \right)$ for a point $p$ in $\operatorname{Dom} \left( {f} \right)$. +:$f^{-1} \left [{O}\right]$ is the [[Definition:Set|set]] of [[Definition:Element|points]] $p$ in $\operatorname{Dom} \left( {f} \right)$ such that $f \left( {p} \right) \in O$. +Let $f$ be [[Definition:Continuous Real Function|continuous]]. +Let $O$ be an [[Definition:Open Set (Real Analysis)|open real set]] that overlaps with $\operatorname{Im} \left( {f} \right)$. +We need to show that $f^{-1} \left [{O}\right]$ is [[Definition:Open Set (Real Analysis)|open]]. +$(1): \quad$ It is shown that $f^{-1} \left [{O}\right]$ is [[Definition:Non-Empty Set|non-empty]]. +A point $q_1$ exists in $O \cap \operatorname{Im} \left( {f} \right)$ as $O$ and $\operatorname{Im} \left( {f} \right)$ overlap. +In particular, $q_1 \in \operatorname{Im} \left( {f} \right)$. +Therefore, by the definition of $\operatorname{Im} \left( {f} \right)$, a point $p_1$ in $\operatorname{Dom} \left( {f} \right)$ exists satisfying $f \left( {p_1} \right) = q_1$. +Also, $q_1 \in O$, which gives: +{{begin-eqn}} +{{eqn | o = \implies + | l = q_1 \in O + | r = f \left( {p_1} \right) \in O + | c = as $f \left( {p_1} \right) = q_1$ +}} +{{eqn | o = \implies + | r = p_1 \in f^{-1} \left[ {O} \right] + | c = by the definition of $f^{-1} \left[ {O} \right]$ +}} +{{end-eqn}} +Accordingly, $f^{-1} \left [{O}\right]$ is [[Definition:Non-Empty Set|non-empty]]. +$(2): \quad$ It is shown that the [[Definition:Real Function|function]] $f$ maps $\left( {x - \delta \,.\,.\, x + \delta} \right)$ into $O$. +Let $x$ be a point in $f^{-1} \left [{O}\right]$. +This means that $x \in \operatorname{Dom} \left( {f} \right)$ and $f \left( {x} \right) \in O$. +We know that $f$ is [[Definition:Continuous Real Function|continuous]]. +Accordingly, $f$ is [[Definition:Continuous Real Function at Point|continuous]] at $x$ as $x \in \operatorname{Dom} \left( {f} \right)$. +Let an $\epsilon > 0$ be given. +That $f$ is [[Definition:Continuous Real Function at Point|continuous]] at $x$, means that: +: a $\delta > 0$ exists such that $f \left( {y} \right) \in \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ whenever $y \in \left( {x - \delta \,.\,.\, x + \delta} \right) \cap \operatorname{Dom} \left( {f} \right)$. +We know that $f \left( {x} \right) \in O$. +Also, $O$ is [[Definition:Open Set (Real Analysis)|open]]. +This allows us to choose $\epsilon \in \R_{>0}$ small enough such that: +:$\left({f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right) \subseteq O$ +We know that $x \in \operatorname{Dom} \left( {f} \right)$. +Also, $\operatorname{Dom} \left( {f} \right)$ is [[Definition:Open Set (Real Analysis)|open]]. +This allows us to choose $\delta \in \R_{>0}$ small enough such that: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq \operatorname{Dom} \left( {f} \right)$ +Having chosen $\epsilon$ and $\delta$ in this way, we have, where $f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]$ is the [[Definition:Image of Subset under Mapping|image]] of $\left( {x - \delta \,.\,.\, x + \delta} \right)$ by $f$: +:$f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] \subseteq \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ as $f \left( {y} \right) \in \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ whenever $y \in \left( {x - \delta \,.\,.\, x + \delta} \right)$ +which implies: +:$f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] \subseteq O$ as $\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right) \subseteq O$. +$(3): \quad$ It is shown that the [[Definition:Real Interval|interval]] $\left( {x - \delta \,.\,.\, x + \delta} \right)$ is a [[Definition:Subset|subset]] of $f^{-1} \left [{O}\right]$. +Keep in mind that by [[Subset of Domain is Subset of Preimage of Image]]: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left[ {f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]} \right]$ +Continuing by elaborating on $f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] \subseteq O$: +{{begin-eqn}} +{{eqn | o = \implies + | l = f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] \subseteq O + | r = f^{-1} \left[ {f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]} \right] \subseteq f^{-1} \left [{O}\right] + | c = by [[Image of Subset is Subset of Image]] +}} +{{eqn | o = \implies + | r = \left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left [{O}\right] + | c = as $\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left[ {f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]} \right]$ +}} +{{end-eqn}} +Because: +:$f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] \subseteq O$ +it follows that: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left [{O}\right]$ +Since $x$ is an arbitrary point in $f^{-1} \left [{O}\right]$, it follows by the definition of [[Definition:Open Set (Real Analysis)|open set]] that $f^{-1} \left [{O}\right]$ is [[Definition:Open Set (Real Analysis)|open]]. +{{qed|lemma}} +=== Sufficient Condition === +Let $\operatorname{Dom} \left( {f} \right)$ be the [[Definition:Domain (Set Theory)/Mapping|domain]] of $f$. +Let $\operatorname{Im} \left( {f} \right)$ be the [[Definition:Image of Mapping|image]] of $f$. +Let $f^{-1} \left [{O}\right]$ be the [[Definition:Preimage of Subset under Mapping|preimage]] of $O$ under $f$. +Thus by definition: +: $\operatorname{Im} \left( {f} \right)$ is the [[Definition:Set|set]] of [[Definition:Element|points]] $q$ in the [[Definition:Codomain of Mapping|codomain]] of $f$ satisfying $q = f \left( {p} \right)$ for a point $p$ in $\operatorname{Dom} \left( {f} \right)$. +:$f^{-1} \left [{O}\right]$ is the [[Definition:Set|set]] of [[Definition:Element|points]] $p$ in $\operatorname{Dom} \left( {f} \right)$ such that $f \left( {p} \right) \in O$. +Let $f^{-1} \left [{O}\right]$ be [[Definition:Open Set (Real Analysis)|open]] for every [[Definition:Open Set (Real Analysis)|open real set]] $O$ that overlaps with $\operatorname{Im} \left( {f} \right)$. +We need to show that $f$ is [[Definition:Continuous Real Function|continuous]]. +Let $O$ be an [[Definition:Open Set (Real Analysis)|open real set]] that overlaps with $\operatorname{Im} \left( {f} \right)$. +$(1): \quad$ It is shown that $\operatorname{Dom} \left( {f} \right)$ is [[Definition:Non-Empty Set|non-empty]]. +A point $q_1$ exists in $O \cap \operatorname{Im} \left( {f} \right)$ as $O$ and $\operatorname{Im} \left( {f} \right)$ overlap. +In particular, $q_1 \in \operatorname{Im} \left( {f} \right)$. +Therefore, by the definition of $\operatorname{Im} \left( {f} \right)$, a point $p_1$ in $\operatorname{Dom} \left( {f} \right)$ exists satisfying $q_1 = f \left( {p_1} \right)$. +Accordingly, $\operatorname{Dom} \left( {f} \right)$ is [[Definition:Non-Empty Set|non-empty]]. +$(2): \quad$ It is shown that the [[Definition:Set|set]] $f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$ is [[Definition:Open Set (Real Analysis)|open]]. +Let $x$ be a point in $\operatorname{Dom} \left( {f} \right)$. +Let $\epsilon > 0$ be given. +The [[Definition:Open Real Interval|open interval]] $\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ overlaps with $\operatorname{Im} \left( {f} \right)$ as $f \left( {x} \right) \in \operatorname{Im} \left( {f} \right)$. +In other words, $\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ is an [[Definition:Open Set (Real Analysis)|open real set]] that overlaps with $\operatorname{Im} \left( {f} \right)$. +Accordingly, $f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$ is [[Definition:Open Set (Real Analysis)|open]] by assumption. +$(3): \quad$ It is shown that the [[Definition:Real Interval|interval]] $\left( {x - \delta \,.\,.\, x + \delta} \right)$ is a [[Definition:Subset|subset]] of $f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$. +By the definition of [[Definition:Preimage of Subset under Mapping|preimage]] of $\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ under $f$: +:$x \in f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$ +as: +: $x \in \operatorname{Dom} \left( {f} \right)$ and $f \left( {x} \right) \in \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ +Since $f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$ is [[Definition:Open Set (Real Analysis)|open]], a $\delta \in \R_{>0}$ exists such that: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$ +which implies: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq \operatorname{Dom} \left( {f} \right)$ as $f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right] \subseteq \operatorname{Dom} \left( {f} \right)$ by the definition of $f^{-1}$ +$(4): \quad$ It is shown that the [[Definition:Set|set]] $f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]$ is a [[Definition:Subset|subset]] of $\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$. +Keep in mind that: +Because: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq \operatorname{Dom} \left( {f} \right)$ +it follows that: +:$f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]$ is defined. +Because: +:$f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right] \subseteq \operatorname{Dom} \left( {f} \right)$ +it follows that: +:$f \left[ {f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]} \right]$ is defined. +By [[Subset of Codomain is Superset of Image of Preimage]]: +:$f \left[ {f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]} \right] \subseteq \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ +We continue by elaborating on $\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$: +{{begin-eqn}} +{{eqn | l = \left( {x - \delta \,.\,.\, x + \delta} \right) + | o = \subseteq + | r = f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right] +}} +{{eqn | ll= \implies + | l = f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] + | o = \subseteq + | r = f \left[ {f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]} \right] + | c = by [[Image of Subset is Subset of Image]] +}} +{{eqn | ll= \implies + | l = f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] + | o = \subseteq + | r = \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right) + | c = as $f \left[ {f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]} \right] \subseteq \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ +}} +{{end-eqn}} +Because: +:$\left( {x - \delta \,.\,.\, x + \delta} \right) \subseteq f^{-1} \left[ {\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)} \right]$ +it follows that: +:$f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right] \subseteq \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ +In other words, a point in $f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]$ is also a point in $\left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$. +Accordingly, let $y \in \left( {x - \delta \,.\,.\, x + \delta} \right)$. +Because: +: $f \left( {y} \right) \in f \left[ {\left( {x - \delta \,.\,.\, x + \delta} \right)} \right]$ +it follows that: +:$f \left( {y} \right) \in \left( {f \left( {x} \right) - \epsilon \,.\,.\, f \left( {x} \right) + \epsilon} \right)$ +Therefore, by the definition of continuity, $f$ is [Definition:Continuous Real Function at Point|continuous]] at $x$. +Since $x$ is an arbitrary point in the [[Definition:Domain of Mapping|domain]] of $f$, $f$ is [[Definition:Continuous Real Function|continuous]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Trisecting the Angle/Neusis Construction} +Tags: Trisecting the Angle + +\begin{theorem} +Let $\alpha$ be an [[Definition:Angle|angle]] which is to be [[Definition:Trisection|trisected]]. +This can be achieved by means of a [[Definition:Neusis Construction|neusis construction]]. +\end{theorem} + +\begin{proof} +We have that $\angle BCD + \angle ACB$ make a [[Definition:Straight Angle|straight angle]]. +As $CD = AB$ by construction, $CD = BC$ by definition of [[Definition:Radius of Circle|radius of circle]]. +Thus $\triangle BCD$ is [[Definition:Isosceles Triangle|isosceles]]. +By [[Isosceles Triangle has Two Equal Angles]]: +:$\angle CBD = \angle CDB$ +From [[Sum of Angles of Triangle equals Two Right Angles]]: +:$\angle BCD + 2 \angle CBD$ equals two [[Definition:Right Angle|right angles]]. +Thus: +:$2 \angle CBD = \angle ACB$ +Similarly, by [[Isosceles Triangle has Two Equal Angles]]: +:$\angle ACB = \angle CAB$ +and again from [[Sum of Angles of Triangle equals Two Right Angles]]: +:$\angle ABC + 2 \angle ACB$ equals two [[Definition:Right Angle|right angles]]. +and so: +:$\angle ABC + 4 \angle CBD$ equals two [[Definition:Right Angle|right angles]]. +But $\alpha + \angle ABC + \angle CBD$ make a [[Definition:Straight Angle|straight angle]]. +Thus: +:$\alpha + \angle ABC + \angle CBD = \angle ABC + 4 \angle CBD$ +and so: +:$\alpha = 3 \angle CBD$ +{{qed}} +\end{proof}<|endoftext|> +\section{Trisecting the Angle by Compass and Straightedge Construction is Impossible} +Tags: Trisecting the Angle + +\begin{theorem} +There is no [[Definition:Compass and Straightedge Construction|compass and straightedge construction]] for the [[Definition:Trisection|trisection]] of the general [[Definition:Angle|angle]]. +\end{theorem} + +\begin{proof} +Let $OA$ and $OB$ [[Definition:Intersection (Geometry)|intersect]] at $O$. +It will be shown that there is no general method using a [[Definition:Compass and Straightedge Construction|compass and straightedge construction]] to construct $OC$ such that $\angle AOB = 3 \times \angle AOC$. +It is [[Definition:Sufficient Condition|sufficient]] to demonstrate that this is impossible for one specific [[Definition:Plane Angle|angle]]. +Hence we choose $\angle AOB = 60 \degrees$. +Let $A$ and $B$ be [[Definition:Point|points]] on the [[Definition:Unit Circle|unit circle]] whose [[Definition:Center of Circle|center]] is at $\tuple {0, 0}$. +Let $A$ lie on the [[Definition:X-Axis|$x$-axis]]. +Thus: +:$O$ is the point $\tuple {0, 0}$ +:$A$ is the point $\tuple {1, 0}$ +:$B$ is the point $\tuple {\cos 60 \degrees, \sin 60 \degrees}$ +These all belong to $\Q \sqbrk {\sqrt 3}$. +[[Definition:Trisection|trisection]] of $AOB$ is equivalent to constructing the [[Definition:Point|point]] $\tuple {\cos 20 \degrees, \sin 20 \degrees}$. +From [[Triple Angle Formula for Cosine]]: +:$\cos 3 \theta = 4 \cos^3 \theta - 3 \cos \theta$ +so: +:$8 \cos^3 20 \degrees - 6 \cos 20 \degrees = 2 \cos 60 \degrees = 1$ +Thus $\cos 20 \degrees$ is a [[Definition:Root of Polynomial|root]] of the [[Definition:Polynomial|polynomial]]: +:$8 x^3 = 6 x - 1$ +which by [[Irreducible Polynomial/Examples/8 x^3 - 6 x - 1 in Rationals|Irreducible Polynomial: $8 x^3 - 6 x - 1$ in Rationals]] is [[Definition:Irreducible Polynomial|irreducible]] over $\Q$. +Thus $\cos 20 \degrees$ is [[Definition:Algebraic Number|algebraic]] over $\Q$ with [[Definition:Degree of Algebraic Number|degree]] $3$. +Thus by [[Algebraic Element of Degree 3 is not Element of Field Extension of Degree Power of 2]], $\cos 20 \degrees$ is not an [[Definition:Element|element]] of any [[Definition:Field Extension|extension]] of $\Q$ of [[Definition:Degree of Field Extension|degree]] $2^m$. +The result follows from [[Point in Plane is Constructible iff Coordinates in Extension of Degree Power of 2]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Doubling the Cube by Compass and Straightedge Construction is Impossible} +Tags: Doubling the Cube, Field Extensions + +\begin{theorem} +There is no [[Definition:Compass and Straightedge Construction|compass and straightedge construction]] to allow a [[Definition:Cube (Geometry)|cube]] to be constructed whose [[Definition:Volume|volume]] is double that of a given [[Definition:Cube (Geometry)|cube]]. +\end{theorem} + +\begin{proof} +Suppose it is possible. +Then from a [[Definition:Cube (Geometry)|cube]] of [[Definition:Edge of Polyhedron|edge]] [[Definition:Length of Line|length]] $L$ we can construct a new [[Definition:Cube (Geometry)|cube]] with [[Definition:Edge of Polyhedron|edge]] [[Definition:Length of Line|length]] $\sqrt [3] 2 L$. +$\sqrt [3] 2$ is [[Definition:Algebraic Number|algebraic]] of [[Definition:Degree of Algebraic Number|degree]] $3$. +This contradicts [[Constructible Length with Compass and Straightedge]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Squaring the Circle by Compass and Straightedge Construction is Impossible} +Tags: Squaring the Circle + +\begin{theorem} +There is no [[Definition:Compass and Straightedge Construction|compass and straightedge construction]] to allow a [[Definition:Square (Geometry)|square]] to be constructed whose [[Definition:Area|area]] is equal to that of a given [[Definition:Circle|circle]]. +\end{theorem} + +\begin{proof} +[[Squaring the Circle]] consists of constructing a [[Definition:Line Segment|line segment]] of [[Definition:Length (Linear Measure)|length]] $\sqrt \pi$ of another. +From [[Constructible Length with Compass and Straightedge]], any such [[Definition:Line Segment|line segment]] has a [[Definition:Length (Linear Measure)|length]] which is an [[Definition:Algebraic Number|algebraic number]] of [[Definition:Degree of Algebraic Number|degree]] $2$. +But [[Pi is Transcendental|$\pi$ is transcendental]]. +Hence $\pi$ and therefore $\sqrt \pi$ is not such an [[Definition:Algebraic Number|algebraic number]]. +Therefore any attempt at such a construction will fail. +{{Qed}} +\end{proof}<|endoftext|> +\section{Difference between Distances from Point on Hyperbola to Foci is Constant} +Tags: Hyperbolas + +\begin{theorem} +Let $K$ be a [[Definition:Hyperbola|hyperbola]]. +Let $F_1$ and $F_2$ be the [[Definition:Focus of Hyperbola|foci]] of $K$. +Let $P$ be an arbitrary [[Definition:Point|point]] on $K$. +Then the [[Definition:Distance (Linear Measure)|distance]] from $P$ to $F_1$ minus the [[Definition:Distance (Linear Measure)|distance]] from $P$ to $F_2$ is [[Definition:Constant|constant]] for all $P$ on $K$. +\end{theorem}<|endoftext|> +\section{Time Taken for Body to Fall at Earth's Surface} +Tags: Mechanics, Gravity + +\begin{theorem} +Let an [[Definition:Object|object]] $m$ be released above ground from a point near the [[Definition:Earth|Earth's]] surface and allowed to fall freely. +Let $m$ fall a [[Definition:Displacement|distance]] $s$ in [[Definition:Time|time]] $t$. +Then: +:$s = \dfrac 1 2 g t^2$ +or: +:$t = \sqrt {\dfrac {2 s} g}$ +where $g$ is the [[Acceleration Due to Gravity]] at the height through which $m$ falls. +It is supposed that the distance $s$ is small enough that $g$ can be considered constant throughout. +\end{theorem} + +\begin{proof} +From [[Body under Constant Acceleration/Distance after Time|Body under Constant Acceleration: Distance after Time]]: +:$\mathbf s = \mathbf u t + \dfrac {\mathbf a t^2} 2$ +Here the body falls from [[Definition:Stationary|rest]], so: +:$\mathbf u = \mathbf 0$ +Thus: +:$\mathbf s = \dfrac {\mathbf g t^2} 2$ +and so taking magnitudes: +:$s = \dfrac {g t^2} 2$ +It follows by multiplying by $\dfrac 2 g$ that: +:$t^2 = \dfrac {2 s} g$ +whence: +:$t = \sqrt {\dfrac {2 s} g}$ +{{qed}} +\end{proof}<|endoftext|> +\section{Length of Chord of Circle} +Tags: Circles, Length of Chord of Circle + +\begin{theorem} +Let $C$ be a [[Definition:Circle|circle]] of [[Definition:Radius of Circle|radius]] $r$. +Let $AB$ be a [[Definition:Chord of Circle|chord]] which joins the [[Definition:Endpoint of Line|endpoints]] of the [[Definition:Arc of Circle|arc]] $ADB$. +Then: +:$AB = 2 r \sin \dfrac \theta 2$ +where $\theta$ is the [[Definition:Angle|angle]] [[Definition:Subtend|subtended]] by $AB$ at the [[Definition:Center of Circle|center]] of $C$. +\end{theorem} + +\begin{proof} +We have $AO = BO$ since they are [[Definition:Radius|radii]]. +Therefore $\triangle AOB$ is [[Definition:Isosceles Triangle|isosceles]]. +By [[Isosceles Triangle has Two Equal Angles]]: +:$\angle OAB = \angle OBA$ +By [[Sum of Angles of Triangle equals Two Right Angles]]: +:$\angle OAB + \angle OBA + \theta = 180 \degrees$ +Therefore $\angle OAB = \dfrac {180 \degrees - \theta} 2 = 90 \degrees - \dfrac \theta 2$. +Thus: +{{begin-eqn}} +{{eqn | l = \dfrac {AB} {\sin \theta} + | r = \dfrac {BO} {\sin \angle OAB} + | c = [[Law of Sines]] +}} +{{eqn | l = {AB} + | r = \dfrac {BO \sin \theta} {\sin \angle OAB} + | c = +}} +{{eqn | r = \dfrac {2 r \sin \frac \theta 2 \cos \frac \theta 2} {\map \sin {90 \degrees - \frac \theta 2} } + | c = [[Double Angle Formula for Sine]] +}} +{{eqn | r = \dfrac {2 r \sin \frac \theta 2 \cos \frac \theta 2} {\cos \frac \theta 2} + | c = [[Sine of Supplementary Angle]] +}} +{{eqn | r = 2 r \sin \dfrac \theta 2 + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +:[[File:LengthOfChord.png|300px]] +Let $O$ be the [[Definition:Center of Circle|center]] of $C$. +Let $AB$ be [[Definition:Bisection|bisected]] by $OD$. +Consider the pair of [[Definition:Triangle (Geometry)|triangles]] $\triangle AOE$ and $\triangle BOE$. +We see that: +:$AE = ED$ since $AB$ is [[Definition:Bisection|bisected]] by $OD$ +:$AO = BO$ since they are [[Definition:Radius|radii]] +:$OE = OE$ since they are common sides. +By [[Triangle Side-Side-Side Equality]], $\triangle AOE = \triangle BOE$. +Then we have: +:$\angle AOE = \angle BOE = \dfrac \theta 2$ +:$\angle OEA = \angle OEB = \dfrac {180 \degrees} 2 = 90 \degrees$ +By {{Defof|Sine Function}}: +:$\sin \dfrac \theta 2 = \dfrac {AE} {AO} = \dfrac {\frac 1 2 AB} r$ +Rearranging, we get: +:$AB = 2 r \sin \dfrac \theta 2$ +as desired. +{{qed}} +\end{proof}<|endoftext|> +\section{Spherical Triangles with Same Angles are Congruent} +Tags: Spherical Triangles + +\begin{theorem} +Two [[Definition:Spherical Triangle|triangles]] on the surface of a given [[Definition:Sphere (Geometry)|sphere]] which have the same [[Definition:Spherical Angle|angles]] are [[Definition:Congruence (Geometry)|congruent]]. +\end{theorem}<|endoftext|> +\section{Sum of Angles of Spherical Triangle} +Tags: Spherical Triangles + +\begin{theorem} +The sum of the [[Definition:Spherical Angle|angles]] of a [[Definition:Spherical Triangle|spherical triangle]] is between $\pi$ and $3 \pi$ [[Definition:Radian|radians]]. +\end{theorem}<|endoftext|> +\section{Ptolemy's Theorem} +Tags: Cyclic Quadrilaterals + +\begin{theorem} +Let $ABCD$ be a [[Definition:Cyclic Quadrilateral|cyclic quadrilateral]]. +Then: +:$AB \times CD + AD \times BC = AC \times BD$ +\end{theorem} + +\begin{proof} +:[[File:PtolemysTheorem.png|450px]] +Let $ABCD$ be a [[Definition:Cyclic Quadrilateral|cyclic quadrilateral]]. +By [[Angles in Same Segment of Circle are Equal]]: +:$\angle BAC = \angle BDC$ +and: +:$\angle ADB = \angle ACB$ +By [[Construction of Equal Angle]], construct $E$ on $AC$ such that: +:$\angle ABE = \angle CBD$ +Since: +:$\angle ABE + \angle CBE = \angle ABC = \angle CBD + \angle ABD$ +it follows that: +:$\angle CBE = \angle ABD$ +By [[Equiangular Triangles are Similar]]: +:$\triangle ABE$ is [[Definition:Similar Triangles|similar]] to $\triangle DBC$ +and: +:$\triangle ABD$ is [[Definition:Similar Triangles|similar]] to $\triangle EBC$ +Thus: +:$\dfrac {AE} {AB} = \dfrac {CD} {BD}$ +and: +:$\dfrac {CE} {BC} = \dfrac {DA} {BD}$ +Equivalently: +:$AE \times BD = AB \times CD$ +and: +:$CE \times BD = BC \times DA$ +Adding: +:$AE \times BD + CE \times BD = AB \times CD + BC \times DA$ +Factorizing: +:$\paren {AE + CE} \times BD = AB \times CD + BC \times DA$ +But: +:$AE + CE = AC$ +so: +:$AC \times BD = AB \times CD + BC \times DA$ +{{qed}} +{{Namedfor|Claudius Ptolemy|cat = Ptolemy}} +\end{proof}<|endoftext|> +\section{Spherical Law of Sines} +Tags: Spherical Trigonometry, Named Theorems, Spherical Law of Sines + +\begin{theorem} +Let $\triangle ABC$ be a [[Definition:Spherical Triangle|spherical triangle]] on the surface of a [[Definition:Sphere (Geometry)|sphere]] whose [[Definition:Center of Sphere|center]] is $O$. +Let the [[Definition:Side of Spherical Triangle|sides]] $a, b, c$ of $\triangle ABC$ be measured by the [[Definition:Subtend|angles subtended]] at $O$, where $a, b, c$ are [[Definition:Opposite (in Triangle)|opposite]] $A, B, C$ respectively. +Then: +:$\dfrac {\sin a} {\sin A} = \dfrac {\sin b} {\sin B} = \dfrac {\sin c} {\sin C}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sin b \sin c \cos A + | r = \cos a - \cos b \cos c + | c = [[Spherical Law of Cosines]] +}} +{{eqn | ll= \leadsto + | l = \sin^2 b \sin^2 c \cos^2 A + | r = \cos^2 a - 2 \cos a \cos b \cos c + \cos^2 b \cos^2 c + | c = +}} +{{eqn | ll= \leadsto + | l = \sin^2 b \sin^2 c \paren {1 - \sin^2 A} + | r = \cos^2 a - 2 \cos a \cos b \cos c + \cos^2 b \cos^2 c + | c = [[Sum of Squares of Sine and Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin^2 b \sin^2 c - \sin^2 b \sin^2 c \sin^2 A + | r = \cos^2 a - 2 \cos a \cos b \cos c + \cos^2 b \cos^2 c + | c = multiplying out +}} +{{eqn | ll= \leadsto + | l = \paren {1 - \cos^2 b} \paren {1 - \cos^2 c} - \sin^2 b \sin^2 c \sin^2 A + | r = \cos^2 a - 2 \cos a \cos b \cos c + \cos^2 b \cos^2 c + | c = [[Sum of Squares of Sine and Cosine]] +}} +{{eqn | ll= \leadsto + | l = 1 - \cos^2 b - \cos^2 c + \cos^2 b \cos^2 c - \sin^2 b \sin^2 c \sin^2 A + | r = \cos^2 a - 2 \cos a \cos b \cos c + \cos^2 b \cos^2 c + | c = multiplying out +}} +{{eqn | n = 1 + | ll= \leadsto + | l = \sin^2 b \sin^2 c \sin^2 A + | r = 1 - \cos^2 a - \cos^2 b - \cos^2 c + 2 \cos a \cos b \cos c + | c = rearranging and simplifying +}} +{{end-eqn}} +Let $X \in \R_{>0}$ such that: +:$X^2 \sin^2 a \sin^2 b \sin^2 c = 1 - \cos^2 a - \cos^2 b - \cos^2 c + 2 \cos a \cos b \cos c$ +Then from $(1)$: +{{begin-eqn}} +{{eqn | l = \dfrac {X^2 \sin^2 a \sin^2 b \sin^2 c} {\sin^2 b \sin^2 c \sin^2 A} + | o = = + | r = \dfrac {1 - \cos^2 a - \cos^2 b - \cos^2 c + 2 \cos a \cos b \cos c} {1 - \cos^2 a - \cos^2 b - \cos^2 c + 2 \cos a \cos b \cos c} + | c = +}} +{{eqn | ll= \leadsto + | l = X^2 + | r = \dfrac {\sin^2 A} {\sin^2 a} + | c = +}} +{{end-eqn}} +In a [[Definition:Spherical Triangle|spherical triangle]], all of the [[Definition:Side of Spherical Triangle|sides]] are less than $\pi$ [[Definition:Radian|radians]]. +The same applies to the [[Definition:Spherical Angle|angles]]. +From [[Shape of Sine Function]]: +:$\sin \theta > 0$ for all $0 < \theta < \pi$ +Hence the [[Definition:Negative Square Root|negative root]] of $\dfrac {\sin^2 A} {\sin^2 a}$ does not apply, and so: +:$X = \dfrac {\sin A} {\sin a}$ +Similarly, from applying the [[Spherical Law of Cosines]] to $\cos B$ and $\cos C$: +{{begin-eqn}} +{{eqn | l = \sin a \sin c \cos B + | r = \cos b - \cos a \cos c +}} +{{eqn | l = \sin a \sin b \cos C + | r = \cos c - \cos a \cos b +}} +{{end-eqn}} +we arrive at the same point: +{{begin-eqn}} +{{eqn | l = X + | r = \dfrac {\sin B} {\sin b} +}} +{{eqn | r = \dfrac {\sin A} {\sin a} +}} +{{end-eqn}} +where: +:$X^2 \sin^2 a \sin^2 b \sin^2 c = 1 - \cos^2 a - \cos^2 b - \cos^2 c + 2 \cos a \cos b \cos c$ +as before. +Hence we have: +:$\dfrac {\sin a} {\sin A} = \dfrac {\sin b} {\sin B} = \dfrac {\sin c} {\sin C}$ +{{qed}} +\end{proof} + +\begin{proof} +:[[File:Spherical-Cosine-Formula-2.png|500px]] +Let $A$, $B$ and $C$ be the [[Definition:Vertex of Polygon|vertices]] of a [[Definition:Spherical Triangle|spherical triangle]] on the surface of a [[Definition:Sphere (Geometry)|sphere]] $S$. +By definition of a [[Definition:Spherical Triangle|spherical triangle]], $AB$, $BC$ and $AC$ are [[Definition:Arc of Circle|arcs]] of [[Definition:Great Circle|great circles]] on $S$. +By definition of a [[Definition:Great Circle|great circle]], the [[Definition:Center of Circle|center]] of each of these [[Definition:Great Circle|great circles]] is $O$. +Let $O$ be joined to each of $A$, $B$ and $C$. +Let $P$ be an arbitrary [[Definition:Point|point]] on $OC$. +Construct $PQ$ [[Definition:Perpendicular|perpendicular]] to $OA$ meeting $OA$ at $Q$. +Construct $PR$ [[Definition:Perpendicular|perpendicular]] to $OB$ meeting $OB$ at $R$. +In the [[Definition:Plane|plane]] $OAB$: +:construct $QS$ [[Definition:Perpendicular|perpendicular]] to $OA$ +:construct $RS$ [[Definition:Perpendicular|perpendicular]] to $OB$ +where $S$ is the [[Definition:Point|point]] where $QS$ and $RS$ [[Definition:Intersection (Geometry)|intersect]]. +Let $OS$ and $PS$ be joined. +Let [[Definition:Tangent Line|tangents]] be constructed at $A$ to the [[Definition:Arc of Circle|arcs]] of the [[Definition:Great Circle|great circles]] $AC$ and $AB$. +These [[Definition:Tangent Line|tangents]] [[Definition:Containment of Angle|contain]] the [[Definition:Spherical Angle|spherical angle]] $A$. +But by construction, $QS$ and $QP$ are [[Definition:Parallel Lines|parallel]] to these [[Definition:Tangent Line|tangents]] +Hence $\angle PQS = \sphericalangle A$. +Similarly, $\angle PRS = \sphericalangle B$. +Also we have: +{{begin-eqn}} +{{eqn | l = \angle COB + | r = a +}} +{{eqn | l = \angle COA + | r = b +}} +{{eqn | l = \angle AOB + | r = c +}} +{{end-eqn}} +It is to be proved that $PS$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $AOB$. +By construction, $OQ$ is [[Definition:Perpendicular|perpendicular]] to both $PQ$ and $QS$. +Thus $OQ$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $PQS$. +Similarly, $OR$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $PRS$. +Thus $PS$ is [[Definition:Perpendicular|perpendicular]] to both $OQ$ and $OR$. +Thus $PS$ is [[Definition:Perpendicular|perpendicular]] to every [[Definition:Straight Line|line]] in the [[Definition:Plane|plane]] of $OQ$ and $OR$. +That is, $PS$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $OAB$. +In particular, $PS$ is [[Definition:Perpendicular|perpendicular]] to $OS$, $SQ$ and $SR$ +It follows that $\triangle PQS$ and $\triangle PRS$ are [[Definition:Right Triangle|right triangles]]. +From the [[Definition:Right Triangle|right triangles]] $\triangle OQP$ and $\triangle ORP$, we have: +{{begin-eqn}} +{{eqn | n = 1 + | l = PQ + | r = OP \sin b +}} +{{eqn | n = 2 + | l = PR + | r = OP \sin a +}} +{{eqn | n = 3 + | l = OQ + | r = OP \cos b +}} +{{eqn | n = 4 + | l = OR + | r = OP \cos a +}} +{{end-eqn}} +From the [[Definition:Right Triangle|right triangles]] $\triangle PQS$ and $\triangle PRS$, we have: +{{begin-eqn}} +{{eqn | l = PS + | r = PS \sin \angle PRS +}} +{{eqn | r = PQ \sin A +}} +{{eqn | l = PS + | r = PR \sin \angle PRS +}} +{{eqn | r = PR \sin B +}} +{{eqn | ll= \leadsto + | l = OP \sin b \sin A + | r = OP \sin a \sin B + | c = from $(1)$ and $(2)$ +}} +{{eqn | ll= \leadsto + | l = \dfrac {\sin a} {\sin A} + | r = \dfrac {\sin b} {\sin B} + | c = +}} +{{end-eqn}} +The result follows by applying this technique [[Definition:Mutatis Mutandis|mutatis mutandis]] to the other [[Definition:Spherical Angle|angles]] of $ABC$. +{{qed}} +\end{proof}<|endoftext|> +\section{Spherical Law of Cosines} +Tags: Spherical Trigonometry, Named Theorems, Spherical Law of Cosines + +\begin{theorem} +Let $\triangle ABC$ be a [[Definition:Spherical Triangle|spherical triangle]] on the surface of a [[Definition:Sphere (Geometry)|sphere]] whose [[Definition:Center of Sphere|center]] is $O$. +Let the [[Definition:Side of Spherical Triangle|sides]] $a, b, c$ of $\triangle ABC$ be measured by the [[Definition:Subtend|angles subtended]] at $O$, where $a, b, c$ are [[Definition:Opposite (in Triangle)|opposite]] $A, B, C$ respectively. +Then: +:$\cos a = \cos b \cos c + \sin b \sin c \cos A$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sin c \sin a \cos B + | r = \cos b - \cos c \cos a + | c = [[Spherical Law of Cosines]] +}} +{{eqn | r = \cos b - \cos c \paren {\cos b \cos c + \sin b \sin c \cos A} + | c = [[Spherical Law of Cosines]] +}} +{{eqn | r = \cos b \paren {1 - \cos^2 c} - \sin b \sin c \cos c \cos A + | c = rearranging +}} +{{eqn | r = \sin^2 c \cos b - \sin b \sin c \cos c \cos A + | c = [[Sum of Squares of Sine and Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin a \cos B + | r = \sin c \cos b - \sin b \cos c \cos A + | c = simplifying +}} +{{end-eqn}} +{{begin-eqn}} +{{eqn | l = \sin a \sin b \cos C + | r = \cos c - \cos a \cos b + | c = [[Spherical Law of Cosines]] +}} +{{eqn | r = \cos c - \cos b \paren {\cos b \cos c + \sin b \sin c \cos A} + | c = [[Spherical Law of Cosines]] +}} +{{eqn | r = \cos c \paren {1 - \cos^2 b} - \sin b \sin c \cos b \cos A + | c = rearranging +}} +{{eqn | r = \sin^2 b \cos c - \sin b \sin c \cos b \cos A + | c = [[Sum of Squares of Sine and Cosine]] +}} +{{eqn | ll= \leadsto + | l = \sin a \cos C + | r = \cos b \sin c - \sin c \cos b \cos A + | c = simplifying +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +:[[File:Spherical-Cosine-Formula-Analog.png|500px]] +Suppose $c$ is less than $\dfrac \pi 2$. +Let $BA$ be [[Definition:Production|produced]] to $D$ so that $BD = \dfrac \pi 2$. +Then: +:$AD = \dfrac \pi 2 - c$ +and: +:$\angle CAD = pi - A$ +Let $C$ and $D$ be joined by an [[Definition:Arc of Circle|arc]] of a [[Definition:Great Circle|great circle]], denoted $x$. +From the [[Definition:Spherical Triangle|triangle]] $\sphericalangle DAC$, using the [[Spherical Law of Cosines]]: +{{begin-eqn}} +{{eqn | l = \cos x + | r = \map \cos {\dfrac \pi 2 - c} \cos b + \map \sin {\dfrac \pi 2 - c} \sin b \, \map \cos {\pi - A} + | c = +}} +{{eqn | r = \sin c \cos b - \cos c \sin b \cos A + | c = +}} +{{end-eqn}} +From the [[Definition:Spherical Triangle|triangle]] $\sphericalangle DBC$, using the [[Spherical Law of Cosines]]: +{{begin-eqn}} +{{eqn | l = \cos x + | r = \cos \dfrac \pi 2 \cos a + \sin \pi 2 \sin a \cos B + | c = +}} +{{eqn | r = \sin a \cos B + | c = +}} +{{end-eqn}} +Hence: +:$\sin a \cos B = \sin c \cos b - \cos c \sin b \cos A$ +The case where $c > \dfrac \pi 2$ is worked similarly, but by making $D$ the [[Definition:Point|point]] between $A$ and $B$ such that $BD$ is $\dfrac \pi 2$. +{{qed}} +\end{proof} + +\begin{proof} +:[[File:Spherical-Cosine-Formula.png|500px]] +Let $A$, $B$ and $C$ be the [[Definition:Vertex of Polygon|vertices]] of a [[Definition:Spherical Triangle|spherical triangle]] on the surface of a [[Definition:Sphere (Geometry)|sphere]] $S$. +By definition of a [[Definition:Spherical Triangle|spherical triangle]], $AB$, $BC$ and $AC$ are [[Definition:Arc of Circle|arcs]] of [[Definition:Great Circle|great circles]] on $S$. +By definition of a [[Definition:Great Circle|great circle]], the [[Definition:Center of Circle|center]] of each of these [[Definition:Great Circle|great circles]] is $O$. +Let $AD$ be the [[Definition:Tangent Line|tangent]] to the [[Definition:Great Circle|great circle]] $AB$. +Let $AE$ be the [[Definition:Tangent Line|tangent]] to the [[Definition:Great Circle|great circle]] $AC$. +Thus the [[Definition:Radius of Sphere|radius]] $OA$ of $S$ is [[Definition:Perpendicular|perpendicular]] to $AD$ and $AE$. +By construction, $AD$ lies in the same [[Definition:Plane|plane]] as $AB$. +Thus when $OB$ is [[Definition:Production|produced]], it will [[Definition:Intersection (Geometry)|intersect]] $AD$ at $D$, say. +Similarly, $OC$ can be [[Definition:Production|produced]] to [[Definition:Intersection (Geometry)|intersect]] $AE$ at $E$, say. +The [[Definition:Spherical Angle|spherical angle]] $\sphericalangle BAC$ is defined as the [[Definition:Angle|angle]] between the [[Definition:Tangent Line|tangents]] $AD$ and $AE$. +Thus: +:$\sphericalangle BAC = \angle DAE$ +or, denoting that [[Definition:Spherical Angle|spherical angle]] $\sphericalangle BAC$ as $A$: +:$A = \angle DAE$ +In the [[Definition:Triangle (Geometry)|(plane) triangle]] $OAD$, we have that $\angle OAD$ is a [[Definition:Right Angle|right angle]]. +We also have that $\angle AOD = \angle AOB$ is equal to $c$, by definition of the [[Definition:Length of Side of Spherical Triangle|length of a side of a spherical triangle]]. +Thus: +{{begin-eqn}} +{{eqn | l = AD + | r = OA \tan c + | c = +}} +{{eqn | l = OD + | r = OA \sec c + | c = +}} +{{end-eqn}} +and by similar analysis of $\triangle OAE$, we have: +{{begin-eqn}} +{{eqn | l = AE + | r = OA \tan b + | c = +}} +{{eqn | l = OE + | r = OA \sec b + | c = +}} +{{end-eqn}} +From consideration of $\triangle DAE$: +{{begin-eqn}} +{{eqn | l = DE^2 + | r = AD^2 + AE^2 - 2 AD \cdot AE \cos \angle DAE + | c = [[Law of Cosines]] +}} +{{eqn | n = 1 + | r = OA^2 \paren {\tan^2 c + \tan^2 b - 2 \tan b \tan c \cos A} + | c = +}} +{{end-eqn}} +From consideration of $\triangle DOE$: +{{begin-eqn}} +{{eqn | l = DE^2 + | r = OD^2 + OE^2 - 2 OD \cdot OE \cos \angle DOE + | c = [[Law of Cosines]] +}} +{{eqn | n = 2 + | r = OA^2 \paren {\sec^2 c + \sec^2 b - 2 \sec b \sec c \cos a} + | c = as $\angle DOE = \angle BOC$ +}} +{{end-eqn}} +Thus: +{{begin-eqn}} +{{eqn | l = \sec^2 c + \sec^2 b - 2 \sec b \sec c \cos a + | r = \tan^2 c + \tan^2 b - 2 \tan b \tan c \cos A + | c = from $(1)$ and $(2)$ +}} +{{eqn | ll= \leadsto + | l = \paren {1 + \tan^2 c} + \paren {1 + \tan^2 b} - 2 \sec b \sec c \cos a + | r = \tan^2 c + \tan^2 b - 2 \tan b \tan c \cos A + | c = [[Difference of Squares of Secant and Tangent]] +}} +{{eqn | ll= \leadsto + | l = 1 - \sec b \sec c \cos a + | r = \tan b \tan c \cos A + | c = simplifying +}} +{{eqn | ll= \leadsto + | l = \cos b \cos c - \cos a + | r = \sin b \sin c \cos A + | c = multiplying both sides by $\cos b \cos c$ +}} +{{end-eqn}} +and the result follows. +{{qed}} +\end{proof} + +\begin{proof} +:[[File:Spherical-Cosine-Formula-2.png|500px]] +Let $A$, $B$ and $C$ be the [[Definition:Vertex of Polygon|vertices]] of a [[Definition:Spherical Triangle|spherical triangle]] on the surface of a [[Definition:Sphere (Geometry)|sphere]] $S$. +By definition of a [[Definition:Spherical Triangle|spherical triangle]], $AB$, $BC$ and $AC$ are [[Definition:Arc of Circle|arcs]] of [[Definition:Great Circle|great circles]] on $S$. +By definition of a [[Definition:Great Circle|great circle]], the [[Definition:Center of Circle|center]] of each of these [[Definition:Great Circle|great circles]] is $O$. +Let $O$ be joined to each of $A$, $B$ and $C$. +Let $P$ be an arbitrary [[Definition:Point|point]] on $OC$. +Construct $PQ$ [[Definition:Perpendicular|perpendicular]] to $OA$ meeting $OA$ at $Q$. +Construct $PR$ [[Definition:Perpendicular|perpendicular]] to $OB$ meeting $OB$ at $R$. +In the [[Definition:Plane|plane]] $OAB$: +:construct $QS$ [[Definition:Perpendicular|perpendicular]] to $OA$ +:construct $RS$ [[Definition:Perpendicular|perpendicular]] to $OB$ +where $S$ is the [[Definition:Point|point]] where $QS$ and $RS$ [[Definition:Intersection (Geometry)|intersect]]. +Let $OS$ and $PS$ be joined. +Let [[Definition:Tangent Line|tangents]] be constructed at $A$ to the [[Definition:Arc of Circle|arcs]] of the [[Definition:Great Circle|great circles]] $AC$ and $AB$. +These [[Definition:Tangent Line|tangents]] [[Definition:Containment of Angle|contain]] the [[Definition:Spherical Angle|spherical angle]] $A$. +But by construction, $QS$ and $QP$ are [[Definition:Parallel Lines|parallel]] to these [[Definition:Tangent Line|tangents]] +Hence $\angle PQS = \sphericalangle A$. +Similarly, $\angle PRS = \sphericalangle B$. +Also we have: +{{begin-eqn}} +{{eqn | l = \angle COB + | r = a +}} +{{eqn | l = \angle COA + | r = b +}} +{{eqn | l = \angle AOB + | r = c +}} +{{end-eqn}} +It is to be proved that $PS$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $AOB$. +By construction, $OQ$ is [[Definition:Perpendicular|perpendicular]] to both $PQ$ and $QS$. +Thus $OQ$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $PQS$. +Similarly, $OR$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $PRS$. +Thus $PS$ is [[Definition:Perpendicular|perpendicular]] to both $OQ$ and $OR$. +Thus $PS$ is [[Definition:Perpendicular|perpendicular]] to every [[Definition:Straight Line|line]] in the [[Definition:Plane|plane]] of $OQ$ and $OR$. +That is, $PS$ is [[Definition:Line Perpendicular to Plane|perpendicular]] to the [[Definition:Plane|plane]] $OAB$. +In particular, $PS$ is [[Definition:Perpendicular|perpendicular]] to $OS$, $SQ$ and $SR$ +It follows that $\triangle PQS$ and $\triangle PRS$ are [[Definition:Right Triangle|right triangles]]. +From the [[Definition:Right Triangle|right triangles]] $\triangle OQP$ and $\triangle ORP$, we have: +{{begin-eqn}} +{{eqn | n = 1 + | l = PQ + | r = OP \sin b +}} +{{eqn | n = 2 + | l = PR + | r = OP \sin a +}} +{{eqn | n = 3 + | l = OQ + | r = OP \cos b +}} +{{eqn | n = 4 + | l = OR + | r = OP \cos a +}} +{{end-eqn}} +Let us denote the [[Definition:Angle|angle]] $\angle SOQ$ by $x$. +Then: +:$\angle ROS = c - x$ +We have that: +{{begin-eqn}} +{{eqn | l = OS + | r = OQ \sec x +}} +{{eqn | l = OS + | r = OR \, \map \sec {c - x} +}} +{{eqn | ll= \leadsto + | l = OR \cos x + | r = OQ \, \map \cos {c - x} + | c = +}} +{{eqn | ll= \leadsto + | l = OP \cos a \cos x + | r = OP \cos b \, \map \cos {c - x} + | c = from $(3)$ and $(4)$ +}} +{{eqn | ll= \leadsto + | l = \cos a \cos x + | r = \cos b \paren {\cos c \cos x - \sin c \sin x} + | c = [[Cosine of Difference]] +}} +{{eqn | n = 5 + | ll= \leadsto + | l = \cos a + | r = \cos b \cos c + \cos b \sin c \tan x + | c = dividing both sides by $\cos x$ and multiplying out +}} +{{end-eqn}} +But we also have: +{{begin-eqn}} +{{eqn | l = \tan x + | r = \dfrac {QS} {OQ} + | c = +}} +{{eqn | r = \dfrac {PQ \cos A} {OQ} + | c = +}} +{{eqn | r = \tan b \cos A + | c = +}} +{{eqn | ll= \leadsto + | l = \cos a + | r = \cos b \cos c + \cos b \sin c \tan b \cos A + | c = substituting for $\tan x$ from $(5)$ +}} +{{eqn | r = \cos b \cos c + \sin b \sin c \cos A + | c = +}} +{{end-eqn}} +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Spherical Law of Cosines/Angles} +Tags: Spherical Trigonometry, Named Theorems + +\begin{theorem} +:$\cos A = -\cos B \cos C + \sin B \sin C \cos a$ +\end{theorem} + +\begin{proof} +Let $\triangle A'B'C'$ be the [[Definition:Polar Triangle|polar triangle]] of $\triangle ABC$. +Let the [[Definition:Side of Spherical Triangle|sides]] $a', b', c'$ of $\triangle A'B'C'$ be [[Definition:Opposite (in Triangle)|opposite]] $A', B', C'$ respectively. +From [[Spherical Triangle is Polar Triangle of its Polar Triangle]] we have that: +:not only is $\triangle A'B'C'$ be the [[Definition:Polar Triangle|polar triangle]] of $\triangle ABC$ +:but also $\triangle ABC$ is the [[Definition:Polar Triangle|polar triangle]] of $\triangle A'B'C'$. +We have: +{{begin-eqn}} +{{eqn | l = \cos a' + | r = \cos b' \cos c' + \sin b' \sin c' \cos A' + | c = [[Spherical Law of Cosines]] +}} +{{eqn | ll= \leadsto + | l = \map \cos {\pi - A} + | r = \map \cos {\pi - B} \, \map \cos {\pi - C} + \map \sin {\pi - B} \, \map \sin {\pi - C} \, \map \cos {\pi - a} + | c = [[Side of Spherical Triangle is Supplement of Angle of Polar Triangle]] +}} +{{eqn | ll= \leadsto + | l = -\cos A + | r = \paren {-\cos B} \paren {-\cos C} + \map \sin {\pi - B} \, \map \sin {\pi - C} \, \paren {-\cos a} + | c = [[Cosine of Supplementary Angle]] +}} +{{eqn | ll= \leadsto + | l = -\cos A + | r = \paren {-\cos B} \paren {-\cos C} + \sin B \sin C \paren {-\cos a} + | c = [[Sine of Supplementary Angle]] +}} +{{eqn | ll= \leadsto + | l = \cos A + | r = -\cos B \cos C + \sin B \sin C \cos a + | c = simplifying and rearranging +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Spherical Law of Tangents} +Tags: Spherical Trigonometry, Named Theorems + +\begin{theorem} +:$\dfrac {\tan \frac 1 2 \paren {A + B} } {\tan \frac 1 2 \paren {A - B} } = \dfrac {\tan \frac 1 2 \paren {a + b} } {\tan \frac 1 2 \paren {a - b} }$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \tan \dfrac {A + B} 2 + | r = \dfrac {\cos \frac {a - b} 2} {\cos \frac {a + b} 2} \cot \dfrac C 2 + | c = [[Napier's Analogies]] +}} +{{eqn | n = 1 + | ll= \leadsto + | l = \tan \frac {A + B} 2 \cos \frac {a + b} 2 + | r = \cos \frac {a - b} 2 \cot \frac C 2 + | c = more manageable in this form +}} +{{end-eqn}} +{{begin-eqn}} +{{eqn | l = \tan \dfrac {A - B} 2 + | r = \dfrac {\sin \frac {a - b} 2} {\sin \frac {a + b} 2} \cot \dfrac C 2 + | c = [[Napier's Analogies]] +}} +{{eqn | n = 2 + | ll= \leadsto + | l = \tan \frac {A - B} 2 \sin \frac {a + b} 2 + | r = \sin \frac {a - b} 2 \cot \frac C 2 + | c = more manageable in this form +}} +{{end-eqn}} +Hence we have: +{{begin-eqn}} +{{eqn | l = \dfrac {\tan \frac {A + B} 2 \cos \frac {a + b} 2} {\tan \frac {A - B} 2 \sin \frac {a + b} 2} + | r = \dfrac {\cos \frac {a - b} 2 \cot \frac C 2} {\sin \frac {a - b} 2 \cot \frac C 2} + | c = dividing $(1)$ by $(2)$ +}} +{{eqn | ll= \leadsto + | l = \dfrac {\tan \frac {A + B} 2} {\tan \frac {A - B} 2} \dfrac 1 {\tan \frac {a + b} 2} + | r = \dfrac 1 {\tan \frac {a - b} 2} + | c = simplifying +}} +{{eqn | ll= \leadsto + | l = \dfrac {\tan \frac {A + B} 2} {\tan \frac {A - B} 2} + | r = \dfrac {\tan \frac {a + b} 2} {\tan \frac {a - b} 2} + | c = simplifying +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Cosine of Half Angle for Spherical Triangles} +Tags: Half Angle Formulas for Spherical Triangles + +\begin{theorem} +:$\cos \dfrac A 2 = \sqrt {\dfrac {\sin s \, \map \sin {s - a} } {\sin b \sin c} }$ +where $s = \dfrac {a + b + c} 2$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \cos a + | r = \cos b \cos c + \sin b \sin c \cos A + | c = [[Spherical Law of Cosines]] +}} +{{eqn | r = \cos b \cos c + \sin b \sin c \paren {2 \cos^2 \dfrac A 2 - 1} + | c = [[Double Angle Formulas/Cosine/Corollary 1|Double Angle Formula for Cosine: Corollary 1]] +}} +{{eqn | r = \map \cos {b + c} + 2 \sin b \sin c \cos^2 \dfrac A 2 + | c = [[Cosine of Sum]] +}} +{{eqn | ll= \leadsto + | l = \cos a - \map \cos {b + c} + | r = 2 \sin b \sin c \cos^2 \dfrac A 2 + | c = rearranging +}} +{{eqn | ll= \leadsto + | l = 2 \sin \dfrac {a + \paren {b + c} } 2 \sin \dfrac {\paren {b + c} - a} 2 + | r = 2 \sin b \sin c \cos^2 \dfrac A 2 + | c = [[Prosthaphaeresis Formula for Cosine minus Cosine]] +}} +{{eqn | ll= \leadsto + | l = \map \sin {\dfrac {a + b + c} 2} \, \map \sin {\dfrac {a + b + c} 2 - a} + | r = \sin b \sin c \cos^2 \dfrac A 2 + | c = +}} +{{eqn | ll= \leadsto + | l = \sin s \, \map \sin {s - a} + | r = \sin b \sin c \cos^2 \dfrac A 2 + | c = setting $s = \dfrac {a + b + c} 2$ and simplifying +}} +{{end-eqn}} +The result follows. +{{qed}} +\end{proof}<|endoftext|> +\section{Cosine of Half Side for Spherical Triangles} +Tags: Half Side Formulas for Spherical Triangles + +\begin{theorem} +:$\cos \dfrac a 2 = \sqrt {\dfrac {\map \cos {S - B} \, \map \cos {S - C} } {\sin B \sin C} }$ +where $S = \dfrac {A + B + C} 2$. +\end{theorem} + +\begin{proof} +Let $\triangle A'B'C'$ be the [[Definition:Polar Triangle|polar triangle]] of $\triangle ABC$. +Let the [[Definition:Side of Spherical Triangle|sides]] $a', b', c'$ of $\triangle A'B'C'$ be [[Definition:Opposite (in Triangle)|opposite]] $A', B', C'$ respectively. +From [[Spherical Triangle is Polar Triangle of its Polar Triangle]] we have that: +:not only is $\triangle A'B'C'$ be the [[Definition:Polar Triangle|polar triangle]] of $\triangle ABC$ +:but also $\triangle ABC$ is the [[Definition:Polar Triangle|polar triangle]] of $\triangle A'B'C'$. +Let $s' = \dfrac {a' + b' + c'} 2$. +We have: +{{begin-eqn}} +{{eqn | l = \sin \dfrac {A'} 2 + | r = \sqrt {\dfrac {\sin \paren {s' - b'} \sin \paren {s' - c'} } {\sin b' \sin c'} } + | c = [[Sine of Half Angle for Spherical Triangles]] +}} +{{eqn | ll= \leadsto + | l = \sin \dfrac {\pi - a} 2 + | r = \sqrt {\dfrac {\map \sin {s' - b'} \, \map \sin {s' - c'} } {\map \sin {\pi - B} \, \map \sin {\pi - C} } } + | c = [[Side of Spherical Triangle is Supplement of Angle of Polar Triangle]] +}} +{{eqn | ll= \leadsto + | l = \map \sin {\dfrac \pi 2 - \dfrac a 2} + | r = \sqrt {\dfrac {\map \sin {s' - b'} \, \map \sin {s' - c'} } {\sin B \sin C} } + | c = [[Sine of Supplementary Angle]] +}} +{{eqn | ll= \leadsto + | l = \cos \dfrac a 2 + | r = \sqrt {\dfrac {\map \sin {s' - b'} \, \map \sin {s' - c'} } {\sin B \sin C} } + | c = [[Sine of Complement equals Cosine]] +}} +{{end-eqn}} +Then: +{{begin-eqn}} +{{eqn | l = s' - b' + | r = \dfrac {\paren {\pi - A} + \paren {\pi - B} + \paren {\pi - C} } 2 - \paren {\pi - B} + | c = [[Side of Spherical Triangle is Supplement of Angle of Polar Triangle]] +}} +{{eqn | r = \dfrac {\pi - \paren {A + B + C} } 2 + B + | c = simplifying +}} +{{eqn | r = \dfrac \pi 2 - \paren {S - B} + | c = where $S = \dfrac {A + B + C} 2$ +}} +{{eqn | ll= \leadsto + | l = \map \sin {s' - b'} + | r = \map \sin {\dfrac \pi 2 - \paren {S - B} } + | c = +}} +{{eqn | r = \map \cos {S - B} + | c = [[Sine of Complement equals Cosine]] +}} +{{end-eqn}} +and similarly: +:$\map \sin {s' - c'} = \map \cos {S - C}$ +The result follows. +{{qed}} +\end{proof}<|endoftext|> +\section{Napier's Rules for Right Angled Spherical Triangles} +Tags: Spherical Trigonometry, Napier's Rules for Right Angled Spherical Triangles + +\begin{theorem} +:[[File:NapiersRules.png|410px]] +Let $\triangle ABC$ be a [[Definition:Spherical Triangle|spherical triangle]] on the surface of a [[Definition:Sphere (Geometry)|sphere]] whose [[Definition:Center of Sphere|center]] is $O$. +Let the [[Definition:Side of Spherical Triangle|sides]] $a, b, c$ of $\triangle ABC$ be measured by the [[Definition:Subtend|angles subtended]] at $O$, where $a, b, c$ are [[Definition:Opposite (in Triangle)|opposite]] $A, B, C$ respectively. +Let either [[Definition:Spherical Angle|angle]] $\angle C$ or [[Definition:Side of Spherical Triangle|side]] $c$ be a [[Definition:Right Angle|right angle]]. +Let the remaining parts of $\triangle ABC$ be arranged in a [[Definition:Circle|circle]] as above: +:for $\angle C$ a [[Definition:Right Angle|right angle]], the '''interior''' +:for $c$ a [[Definition:Right Angle|right angle]], the '''exterior''' +where the [[Definition:Symbol|symbol]] $\Box$ denotes a [[Definition:Right Angle|right angle]]. +Let one of the parts of this [[Definition:Circle|circle]] be called a '''middle part'''. +Let the two neighboring parts of the '''middle part''' be called '''adjacent parts'''. +Let the remaining two parts be called '''opposite parts'''. +\end{theorem}<|endoftext|> +\section{Equation of Circle/Cartesian} +Tags: Equation of Circle + +\begin{theorem} +:$\paren {x - a}^2 + \paren {y - b}^2 = R^2$ +\end{theorem} + +\begin{proof} +Let the [[Definition:Point|point]] $\tuple {x, y}$ satisfy the [[Definition:Equation of Geometric Figure|equation]]: +:$(1): \quad \paren {x - a}^2 + \paren {y - b}^2 = R^2$ +By the [[Distance Formula]], the [[Definition:Distance (Linear Measure)|distance]] between this $\tuple {x, y}$ and $\tuple {a, b}$ is: +:$\sqrt {\paren {x - a}^2 + \paren {y - b}^2}$ +But from equation $(1)$, this quantity equals $R$. +Therefore the [[Definition:Distance (Linear Measure)|distance]] between [[Definition:Point|points]] satisfying the [[Definition:Equation of Geometric Figure|equation]] and the [[Definition:Center of Circle|center]] is [[Definition:Constant|constant]] and equal to the [[Definition:Radius of Circle|radius]]. +Thus $\tuple {x, y}$ lies on the [[Definition:Circumference of Circle|circumference]] of a [[Definition:Circle|circle]] with [[Definition:Radius of Circle|radius]] $R$ and [[Definition:Center of Circle|center]] $\tuple {a, b}$. +Now suppose that $\tuple {x, y}$ does not satisfy the [[Definition:Equation of Geometric Figure|equation]]: +:$\paren {x - a}^2 + \paren {y - b}^2 = R^2$ +Then by the same reasoning as above, the [[Definition:Distance (Linear Measure)|distance]] between $\tuple {x, y}$ and $\tuple {a, b}$ does not equal $R$. +Therefore $\tuple {x, y}$ does not lie on the [[Definition:Circumference of Circle|circumference]] of a [[Definition:Circle|circle]] with [[Definition:Radius of Circle|radius]] $R$ and [[Definition:Center of Circle|center]] $\tuple {a, b}$. +Hence it follows that the [[Definition:Point|points]] satisfying $(1)$ are exactly those [[Definition:Point|points]] which are the [[Definition:Circle|circle]] in question. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Circle/Parametric} +Tags: Equation of Circle + +\begin{theorem} +:$x = a + R \cos t, \ y = b + R \sin t$ +\end{theorem} + +\begin{proof} +Let the [[Definition:Point|point]] $\tuple {x, y}$ satisfy the equations: +:$x = a + R \cos t$ +:$y = b + R \sin t$ +By the [[Distance Formula]], the [[Definition:Distance (Linear Measure)|distance]] between $\tuple {x, y}$ and $\tuple {a, b}$ is: +:$\sqrt {\paren {\paren {a + R \cos t} - a}^2 + \paren {\paren {b + R \sin t} - b}^2}$ +This simplifies to: +:$\sqrt {R^2 \cos^2 t + R^2 \sin^2 t} = R \sqrt {\cos^2 t + \sin^2 t}$ +Then by [[Sum of Squares of Sine and Cosine]], this [[Definition:Distance (Linear Measure)|distance]] equals $R$. +Therefore the [[Definition:Distance (Linear Measure)|distance]] between [[Definition:Point|point]]s satisfying the equation and the [[Definition:Center of Circle|center]] is [[Definition:Constant|constant]] and equal to the [[Definition:Radius of Circle|radius]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Circle/Polar} +Tags: Equation of Circle + +\begin{theorem} +: $r^2 - 2 r r_0 \map \cos {\theta - \varphi} + \paren {r_0}^2 = R^2$ +\end{theorem} + +\begin{proof} +Let the point $\polar {r, \theta}_\text {Polar}$ satisfy the [[Definition:Equation of Geometric Figure|equation]]: +:$r^2 - 2 r r_0 \map \cos {\theta - \varphi} + \paren {r_0}^2 = R^2$ +Let the [[Definition:Point|points]] $\polar {r, \theta}$ and $\polar {r_0, \varphi}$ be rewritten in [[Definition:Cartesian Coordinate System|Cartesian coordinates]]: +:$\polar {r, \theta}_\text {Polar} = \tuple {r \cos \theta, r \sin \theta}_\text{Cartesian}$ +:$\polar {r_0, \varphi}_\text{Polar} = \tuple {r_0 \cos \varphi, r_0 \sin \varphi}_\text{Cartesian}$ +Thus the [[Definition:Distance (Linear Measure)|distance]] between $\polar {r, \theta}_\text {Polar}$ and $\polar {r_0, \varphi}_\text{Polar}$ is: +{{begin-eqn}} +{{eqn | o = + | r = \sqrt {\paren {r \cos \theta - r_0 \cos \varphi}^2 + \paren {r \sin \theta - r_0 \sin \varphi}^2} +}} +{{eqn | r = \sqrt {r^2 \cos^2 \theta + \paren {r_0}^2 \cos^2 \varphi - 2 r r_0 \cos \theta \cos \varphi + r^2 \sin^2 \theta + \paren {r_0}^2 \sin^2 \varphi - 2 r r_0 \sin \theta \sin \varphi} +}} +{{eqn | r = \sqrt {r^2 \paren {\cos^2 \theta + \sin^2 \theta} + \paren {r_0}^2 \paren {\cos^2 \varphi + \sin^2 \varphi} - 2 r r_0 \paren {\cos \theta \cos \varphi + \sin \theta \sin \varphi} } +}} +{{end-eqn}} +{{begin-eqn}} +{{eqn | r = \sqrt {r^2 + \paren {r_0}^2 - 2 r r_0 \map \cos {\theta - \varphi} } + | c = [[Cosine of Difference]] and [[Sum of Squares of Sine and Cosine]] +}} +{{end-eqn}} +But from the equation, this quantity equals $R$. +Therefore the distance between points satisfying the equation and the [[Definition:Center of Circle|center]] is [[Definition:Constant|constant]] and equal to the [[Definition:Radius of Circle|radius]] $R$. +{{qed}} +[[Category:Equation of Circle]] +fl8juwo98qgj9lsefc4c4of07uedhad +\end{proof}<|endoftext|> +\section{Equation of Circle/Cartesian/Corollary 1} +Tags: Equation of Circle + +\begin{theorem} +The [[Definition:Equation of Geometric Figure|equation]]: +:$A \paren {x^2 + y^2} + B x + C y + D = 0$ +is the [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Circle|circle]] with [[Definition:Radius of Circle|radius]] $R$ and [[Definition:Center of Circle|center]] $\tuple {a, b}$, where: +:$R = \dfrac 1 {2 A} \sqrt {B^2 + C^2 - 4 A D}$ +:$\tuple {a, b} = \tuple {\dfrac {-B} {2 A}, \dfrac {-C} {2 A} }$ +provided: +:$A > 0$ +:$B^2 + C^2 \ge 4 A D$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = A \paren {x^2 + y^2} + B x + C y + D + | r = 0 + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = x^2 + y^2 + \frac B A x + \frac C A y + | r = - \frac D A + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = x^2 + 2 \frac B {2 A} x + \frac {B^2} {4 A^2} + y^2 + 2 \frac C {2 A} y + \frac {C^2} {4 A^2} + | r = \frac {B^2} {4 A^2} + \frac {C^2} {4 A^2} - \frac D A + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = \paren {x + \frac B {2 A} }^2 + \paren {y + \frac C {2 A} }^2 + | r = \frac {B^2} {4 A^2} + \frac {C^2} {4 A^2} - \frac D A + | c = +}} +{{eqn | r = \frac {B^2} {4 A^2} + \frac {C^2} {4 A^2} - \frac {4 A D} {4 A^2} + | c = +}} +{{eqn | r = \frac 1 {4 A^2} \paren {B^2 + C^2 - 4 A D} + | c = +}} +{{end-eqn}} +This last expression is [[Definition:Positive Real Number|non-negative]] {{iff}} $B^2 + C^2 \ge 4 A D$. +In such a case $\dfrac 1 {4 A^2} \paren {B^2 + C^2 - 4 A D}$ is in the form $R^2$ and so: +:$\paren {x + \dfrac B {2 A} }^2 + \paren {y + \dfrac C {2 A} }^2 = \dfrac 1 {4 A^2} \paren {B^2 + C^2 - 4 A D}$ +is in the form: +:$\paren {x - a}^2 + \paren {y - b}^2 = R^2$ +Hence the result from [[Equation of Circle/Cartesian|Equation of Circle: Cartesian]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Circle/Cartesian/Corollary 2} +Tags: Equation of Circle + +\begin{theorem} +The [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Circle|circle]] with [[Definition:Radius of Circle|radius]] $R$ whose [[Definition:Center of Circle|center]] is at the [[Definition:Origin|origin]] expressed in [[Definition:Cartesian Coordinate System|Cartesian coordinates]] is: +:$x^2 + y^2 = R^2$ +\end{theorem} + +\begin{proof} +From [[Equation of Circle/Cartesian|Equation of Circle: Cartesian]], the [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Circle|circle]] with [[Definition:Radius of Circle|radius]] $R$ and [[Definition:Center of Circle|center]] $\tuple {a, b}$ expressed in [[Definition:Cartesian Coordinate System|Cartesian coordinates]] is: +:$\paren {x - a}^2 + \paren {y - b}^2 = R^2$ +Setting $a = b = 0$ yields the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Distance Formula/3 Dimensions} +Tags: Euclidean Geometry, Solid Analytic Geometry + +\begin{theorem} +The [[Definition:Distance (Linear Measure)|distance]] $d$ between two [[Definition:Point|points]] $A = \tuple {x_1, y_1, z_1}$ and $B = \tuple {x_2, y_2, z_2}$ in a [[Definition:Cartesian Space|Cartesian space]] of [[Definition:Dimension (Geometry)|3 dimensions]] is: +:$d = \sqrt {\paren {x_1 - x_2}^2 + \paren {y_1 - y_2}^2 + \paren {z_1 - z_2}^2}$ +\end{theorem} + +\begin{proof} +:[[File:DistanceFormula3D.png|600px]] +Let $d$ be the [[Definition:Distance (Linear Measure)|distance]] to be found between $A = \tuple {x_1, y_1, z_1}$ and $B = \tuple {x_2, y_2, z_2}$. +Let the [[Definition:Point|points]] $C$ and $D$ be defined as: +:$C = \tuple {x_2, y_1, z_1}$ +:$D = \tuple {x_2, y_2, z_1}$ +Let $d'$ be the [[Definition:Distance (Linear Measure)|distance]] between $A$ and $D$. +From [[Distance Formula]], it can be seen that: +:$d' = \sqrt {\paren {x_1 - x_2}^2 + \paren {y_1 - y_2}^2}$ +We note that $\triangle ADB$ is a [[Definition:Right Triangle|right triangle]]. +Thus by [[Pythagoras's Theorem]]: +:$AB^2 = AD^2 + DB^2$ +Thus: +{{begin-eqn}} +{{eqn | l = d^2 + | r = d'^2 + DB^2 + | c = +}} +{{eqn | r = \paren {x_1 - x_2}^2 + \paren {y_1 - y_2}^2 + \paren {z_1 - z_2}^2 + | c = +}} +{{end-eqn}} +and so: +:$d = \sqrt {\paren {x_1 - x_2}^2 + \paren {y_1 - y_2}^2 + \paren {z_1 - z_2}^2}$ +as it was to be proved. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Sphere/Rectangular Coordinates} +Tags: Spheres, Examples of Surfaces + +\begin{theorem} +:$\paren {x - a}^2 + \paren {y - b}^2 + \paren {z - c}^2 = R^2$ +\end{theorem} + +\begin{proof} +Let the [[Definition:Point|point]] $\tuple {x, y, z}$ satisfy the [[Definition:Equation of Geometric Figure|equation]]: +:$(1): \quad \paren {x - a}^2 + \paren {y - b}^2 + \paren {z - c}^2 = R^2$ +By the [[Distance Formula in 3 Dimensions]], the [[Definition:Distance (Linear Measure)|distance]] between this $\tuple {x, y, z}$ and $\tuple {a, b, c}$ is: +:$\sqrt {\paren {x - a}^2 + \paren {y - b}^2 + \paren {z - c}^2}$ +But from equation $(1)$, this quantity equals $R$. +Therefore the [[Definition:Distance (Linear Measure)|distance]] between [[Definition:Point|points]] satisfying the [[Definition:Equation of Geometric Figure|equation]] and the [[Definition:Center of Sphere|center]] is [[Definition:Constant|constant]] and equal to the [[Definition:Radius of Sphere|radius]]. +Thus $\tuple {x, y, z}$ lies on the [[Definition:Surface|surface]] of a [[Definition:Sphere|sphere]] with [[Definition:Radius of Sphere|radius]] $R$ and [[Definition:Center of Sphere|center]] $\tuple {a, b, c}$. +Now suppose that $\tuple {x, y, z}$ does not satisfy the [[Definition:Equation of Geometric Figure|equation]]: +:$\paren {x - a}^2 + \paren {y - b}^2 + \paren {z - c}^2 = R^2$ +Then by the same reasoning as above, the [[Definition:Distance (Linear Measure)|distance]] between $\tuple {x, y, z}$ and $\tuple {a, b, c}$ does not equal $R$. +Therefore $\tuple {x, y, z}$ does not lie on the [[Definition:Surface|surface]] of a [[Definition:Sphere|sphere]] with [[Definition:Radius of Sphere|radius]] $R$ and [[Definition:Center of Sphere|center]] $\tuple {a, b, c}$. +Hence it follows that the [[Definition:Point|points]] satisfying $(1)$ are exactly those [[Definition:Point|points]] which are the [[Definition:Sphere (Geometry)|sphere]] in question. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Sphere/Rectangular Coordinates/Corollary} +Tags: Spheres, Examples of Surfaces + +\begin{theorem} +The [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Sphere (Geometry)|sphere]] with [[Definition:Radius of Sphere|radius]] $R$ whose [[Definition:Center of Sphere|center]] is at the [[Definition:Origin|origin]] expressed in [[Definition:Cartesian Coordinate System|Cartesian coordinates]] is: +:$x^2 + y^2 + z^2 = R^2$ +\end{theorem} + +\begin{proof} +From [[Equation of Sphere in Rectangular Coordinates]], the [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Sphere (Geometry)|sphere]] with [[Definition:Radius of Sphere|radius]] $R$ and [[Definition:Center of Sphere|center]] $\tuple {a, b, c}$ expressed in [[Definition:Cartesian Coordinate System|Cartesian coordinates]] is: +:$\paren {x - a}^2 + \paren {y - b}^2 + \paren {z - c}^2 = R^2$ +Setting $a = b = c = 0$ yields the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Conic Section} +Tags: Conic Sections + +\begin{theorem} +The general [[Definition:Conic Section|conic section]] can be expressed in [[Definition:Cartesian Coordinate System|Cartesian coordinates]] in the form: +:$a x^2 + b x y + c y^2 + d x + e y + f = 0$ +for some $a, b, c, d, e, f \in \R$. +\end{theorem} + +\begin{proof} +By definition, a [[Definition:Conic Section|conic section]] is the [[Definition:Set|set]] of [[Definition:Point|points]] of [[Definition:Intersection (Geometry)|intersection]] between a [[Definition:Cone|cone]] and a [[Definition:Plane|plane]]. +Let $P = \tuple {\alpha, \beta, \gamma}$ be the [[Definition:Apex of Cone|apex]] of the [[Definition:Cone|cone]]. +Let $Q = \tuple {x, y, z}$ be a point of [[Definition:Intersection (Geometry)|intersection]] between the [[Definition:Plane|plane]] and the [[Definition:Cone|cone]]. +From [[Equation of Right Circular Cone]], we have: +:$(1): \quad \paren {x - \alpha}^2 + \paren {y - \beta}^2 = \eta \paren {z - \gamma}^2$ +for some [[Definition:Strictly Positive Real Number|(strictly) positive real number]] $\eta$. +From [[Equation of Plane]], we have: +$A x + B y + C z + D = 0$ +for some [[Definition:Real Number|real numbers]] $A, B, C, D$. +Let $C \ne 0$. +Then: +:$z = - D - \dfrac {A x + B y} C$ +Hence, plugging in $z$ into $(1)$: +:$\paren {x - \alpha}^2 + \paren {y - \beta}^2 = \eta \paren {\dfrac {A x + B y} C + D + \gamma}^2$ +Opening up the brackets and making the corresponding notation of [[Definition:Constant|constants]], we get the desired result. +{{qed|lemma}} +Let $C = 0$. +Then: +:$A x + B y + D = 0$ +In order for the [[Definition:Plane|plane]] to be well-defined, $A$ or $B$ must be non-zero. +{{WLOG}}, since the cone equation $(1)$ is symmetric in $x$ and $y$, we can consider $B \ne 0$ (otherwise interchange coordinates). +Then: +:$y = - D - \dfrac {A x} B$ +Hence, plugging $y$ into $(1)$: +:$\paren {x - \alpha}^2 + \paren {-D - \dfrac {A x} B - \beta}^2 = \eta \paren {z - \gamma}^2$ +Opening up the brackets and making the corresponding notation of constants and coordinates ($z \leftrightarrow y$), we get the desired result. +{{qed}} +\end{proof}<|endoftext|> +\section{Graph of Quadratic describes Parabola} +Tags: Parabolas, Quadratic Equations, Graph of Quadratic describes Parabola + +\begin{theorem} +The [[Definition:Locus|locus]] of the [[Definition:Equation of Geometric Figure|equation]] defining a [[Definition:Quadratic Equation|quadratic]]: +:$y = a x^2 + b x + c$ +describes a [[Definition:Parabola|parabola]]. +\end{theorem} + +\begin{proof} +Consider the [[Definition:Parabola/Focus-Directrix|focus-directrix property]] of a [[Definition:Parabola|parabola]] $P$. +Let the [[Definition:Focus of Parabola|focus]] of $P$ be the point $\tuple {0, f}$ on a [[Definition:Cartesian Plane|Cartesian plane]]. +Let the [[Definition:Directrix of Parabola|directrix]] of $P$ be the [[Definition:Straight Line|straight line]] $y = -d$. +Let $\tuple {x, y}$ be an arbitrary [[Definition:Point|point]] on $P$. +Then by the [[Definition:Parabola/Focus-Directrix|focus-directrix property]]: +:$y + d = \sqrt {\paren {x - k}^2 + \tuple {y - f}^2}$ +where: +:$y + d$ is the [[Definition:Distance (Linear Measure)|distance]] from $\tuple {x, y}$ to the [[Definition:Straight Line|straight line]] $y = -d$ +:$\sqrt {\paren {x - k}^2 + \paren {y - f}^2}$ is the [[Definition:Distance (Linear Measure)|distance]] from $\tuple {x, y}$ to the [[Definition:Point|point]] $\tuple {k, f}$ by the [[Distance Formula]]. +Hence: +{{begin-eqn}} +{{eqn | l = \paren {y + d}^2 + | r = \paren {x - k}^2 + \paren {y - f}^2 + | c = +}} +{{eqn | ll= \leadsto + | l = y^2 + 2 y d + d^2 + | r = x^2 - 2 k x + k^2 + y^2 - 2 f y + f^2 + | c = +}} +{{eqn | ll= \leadsto + | l = 2 y \paren {f + d} + | r = x^2 - 2 k x + f^2 + k^2 - d^2 + | c = +}} +{{eqn | ll= \leadsto + | l = y + | r = \frac 1 {2 \paren {f + d} } x^2 - \frac k {\paren {f + d} } x + \frac {f - d} 2 + | c = +}} +{{end-eqn}} +This is in the form $y = a x^2 + b^2 + c$. +By setting $k$, $f$ and $d$ appropriately in terms of $a$, $b$ and $c$, the specific [[Definition:Focus of Parabola|focus]] and [[Definition:Directrix of Parabola|directrix]] can be appropriately positioned. +{{qed}} +[[Category:Parabolas]] +[[Category:Quadratic Equations]] +[[Category:Graph of Quadratic describes Parabola]] +0clbumwrj6btk7hwlqwr202ws8raqz7 +\end{proof}<|endoftext|> +\section{Graph of Quadratic describes Parabola/Corollary 1} +Tags: Graph of Quadratic describes Parabola + +\begin{theorem} +The [[Definition:Locus|locus]] of the [[Definition:Equation of Geometric Figure|equation]] of the [[Definition:Square (Algebra)|square function]]: +:$y = x^2$ +describes a [[Definition:Parabola|parabola]]. +\end{theorem} + +\begin{proof} +This is a particular instance of [[Graph of Quadratic describes Parabola]], where: +:$y = a x^2 + b x + c$ +is the [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Parabola|parabola]]. +The result follows by setting $a = 1, b = 0, c = 0$. +{{qed}} +\end{proof}<|endoftext|> +\section{Graph of Quadratic describes Parabola/Corollary 2} +Tags: Graph of Quadratic describes Parabola + +\begin{theorem} +The [[Definition:Locus|locus]] of the [[Definition:Equation of Geometric Figure|equation]] of the [[Definition:Square Root|square root function]] on the [[Definition:Positive Real Number|non-negative reals]]: +:$\forall x \in \R_{\ge 0}: \map f x = \sqrt x$ +describes half of a [[Definition:Parabola|parabola]]. +\end{theorem} + +\begin{proof} +From [[Graph of Quadratic describes Parabola/Corollary 1|Graph of Quadratic describes Parabola: Corollary 1]], where: +:$y = x^2$ +is the [[Definition:Equation of Geometric Figure|equation]] of a [[Definition:Parabola|parabola]]. +Let $f: \R \to \R$ be the [[Definition:Real Function|real function]] defined as: +:$\map f x = x^2$ +From [[Square of Real Number is Non-Negative]], the [[Definition:Image of Mapping|image]] of $f$ is $\R_{\ge 0}$. +Also we have from [[Positive Real Number has Two Square Roots]]: +:$\forall x \in \R: \paren {-x}^2 = x^2$ +Thus it is necessary to apply a [[Definition:Bijective Restriction|bijective restriction]] upon $f$. +Let $g: \R_{\ge 0} \to \R_{\ge 0}$ be the [[Definition:Bijective Restriction|bijective restriction]] of $f$ to $\R_{\ge 0} \times \R_{\ge 0}$: +:$\forall x \in \R_{\ge 0}: \map g x = x^2$ +From [[Inverse of Bijection is Bijection]], $g^{-1}: \R_{\ge 0} \to \R_{\ge 0}$ is also a [[Definition:Bijection|bijection]]. +By definition: +:$\forall x \in \R_{\ge 0}: \map {g^{-1} } x = +\sqrt x$ +Then from [[Graph of Inverse Mapping]], the [[Definition:Graph of Mapping|graph]] of $g^{-1}$ is the same as the [[Definition:Graph of Mapping|graph]] of $g$, reflected in the line $x = y$. +As the [[Definition:Graph of Mapping|graph]] of $f$ is a [[Definition:Parabola|parabola]], the [[Definition:Graph of Mapping|graph]] of $g$ is also a [[Definition:Parabola|parabola]], but because of the [[Definition:Bijective Restriction|restriction]] to $\R_{\ge 0}$, just half of it. +Thus the [[Definition:Graph of Mapping|graph]] of $g^{-1}$ is also half a [[Definition:Parabola|parabola]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Natural Numbers under Addition form Commutative Semigroup} +Tags: Natural Numbers, Examples of Semigroups + +\begin{theorem} +The [[Definition:Algebraic Structure|algebraic structure]] $\left({\N, +}\right)$ consisting of the [[Definition:Set|set]] of [[Definition:Natural Numbers|natural numbers]] $\N$ under [[Definition:Natural Number Addition|addition]] $+$ is a [[Definition:Commutative Semigroup|commutative semigroup]]. +\end{theorem} + +\begin{proof} +Consider the [[Definition:Natural Numbers|natural numbers]] $\N$ defined as the [[Definition:Naturally Ordered Semigroup|naturally ordered semigroup]]. +From the definition of the [[Definition:Naturally Ordered Semigroup|naturally ordered semigroup]], it follows that $\left ({\N, +}\right)$ is a [[Definition:Commutative Semigroup|commutative semigroup]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Gödel's Incompleteness Theorems/First/Corollary} +Tags: Gödel's Incompleteness Theorems + +\begin{theorem} +If $T$ is both [[Definition:Consistent (Logic)|consistent]] and [[Definition:Complete Theory|complete]], it does not contain [[Definition:Minimal Arithmetic|minimal arithmetic]]. +\end{theorem} + +\begin{proof} +This is simply the [[Definition:Contrapositive Statement|contrapositive]] of [[Gödel's First Incompleteness Theorem]]. +{{qed}} +{{Namedfor|Kurt Friedrich Gödel}} +\end{proof}<|endoftext|> +\section{Monomorphism that is Split Epimorphism is Split Monomorphism} +Tags: Category Theory + +\begin{theorem} +Let $\mathbf C$ be a [[Definition:Metacategory|metacategory]]. +Let $f: C \to D$ be a morphism in $\mathbf C$ such that $f$ is a [[Definition:Monomorphism (Category Theory)|monomorphism]] and a [[Definition:Split Epimorphism|split epimorphism]]. +Then $f: C \to D$ is a [[Definition:Split Monomorphism|split monomorphism]]. +{{explain|What are $C$ and $D$?}} +\end{theorem} + +\begin{proof} +Let $g: D \to C$ be the [[Definition:Right Inverse (Category Theory)|right inverse]] of $f$: +:$f \circ g = \operatorname{id}_D$ +which is guaranteed to exist by definition of [[Definition:Split Epimorphism|split epimorphism]]. +Therefore: +:$f \circ g \circ f = \operatorname{id}_D \circ f = f \circ \operatorname{id}_C$ +by the property of the [[Definition:Identity Morphism|identity morphism]]. +Since $f$ is [[Definition:Left Cancellable Element|left cancellable]], by the definition of [[Definition:Monomorphism (Category Theory)|monomorphism]], we have: +:$g \circ f = \operatorname{id}_C$ +Hence $f$ is a [[Definition:Split Monomorphism|split monomorphism]] with [[Definition:Left Inverse (Category Theory)|left inverse]] $g$. +{{qed}} +[[Category:Category Theory]] +etbcg72blm50bar942619desslok0uv +\end{proof}<|endoftext|> +\section{Epimorphism that is Split Monomorphism is Split Epimorphism} +Tags: Category Theory + +\begin{theorem} +Let $\mathbf C$ be a [[Definition:Metacategory|metacategory]]. +Let $f: C \to D$ be a [[Definition:Epimorphism (Category Theory)|epimorphism]] and a [[Definition:Split Monomorphism|split monomorphism]]. +Then $f: C \to D$ is a [[Definition:Split Epimorphism|split epimorphism]]. +\end{theorem} + +\begin{proof} +{{explain|What is a Dual proof}} +Dual proof of [[Monomorphism that is Split Epimorphism is Split Monomorphism]]. +{{qed}} +[[Category:Category Theory]] +peimi58rmzw3im8toqfkj41g1bmf24k +\end{proof}<|endoftext|> +\section{Westwood's Puzzle} +Tags: Euclidean Geometry + +\begin{theorem} +:[[File:WestwoodsPuzzle.png|500px]] +Take any [[Definition:Rectangle|rectangle]] $ABCD$ and draw the [[Definition:Diagonal of Quadrilateral|diagonal]] $AC$. +[[Definition:Incircle of Triangle|Inscribe]] a [[Definition:Circle|circle]] $GFJ$ in one of the resulting [[Definition:Triangle (Geometry)|triangles]] $\triangle ABC$. +Drop [[Definition:Perpendicular|perpendiculars]] $IEF$ and $HEJ$ from the [[Definition:Incenter of Triangle|center of this incircle]] $E$ to the [[Definition:Side of Polygon|sides]] of the [[Definition:Rectangle|rectangle]]. +Then the [[Definition:Area|area]] of the [[Definition:Rectangle|rectangle]] $DHEI$ equals half the [[Definition:Area|area]] of the [[Definition:Rectangle|rectangle]] $ABCD$. +\end{theorem} + +\begin{proof} +[[Perpendicular through Given Point|Construct the perpendicular]] from $E$ to $AC$, and call its foot $G$. +Let $K$ be the [[Definition:Intersection (Geometry)|intersection]] of $IE$ and $AC$. +Let $L$ be the [[Definition:Intersection (Geometry)|intersection]] of $EH$ and $AC$. +:[[File:Westwood's Puzzle Proof.png|500px]] +First we have: +{{begin-eqn}} +{{eqn | n = 1 + | l = \angle CKI + | r = \angle EKG + | c = [[Two Straight Lines make Equal Opposite Angles]] +}} +{{eqn | l = \angle EGK + | r = \text {Right Angle} + | c = [[Tangent to Circle is Perpendicular to Radius]] +}} +{{eqn | l = \angle KIC + | r = \text {Right Angle} + | c = as $IF \perp CD$ +}} +{{eqn | n = 2 + | ll= \therefore + | l = \angle EGK + | r = \angle KIC + | c = [[Axiom:Euclid's Fourth Postulate|Euclid's Fourth Postulate]] +}} +{{eqn | l = IC + | r = EJ + | c = [[Opposite Sides and Angles of Parallelogram are Equal]] +}} +{{eqn | l = EJ + | r = EG + | c = as both are [[Definition:Radius of Circle|radii]] of the same [[Definition:Circle|circle]] +}} +{{eqn | n = 3 + | ll= \therefore + | l = IC + | r = EG + | c = [[Axiom:Euclid's Common Notion 1|Euclid's First Common Notion]] +}} +{{eqn | ll= \therefore + | l = \Area \triangle IKC + | r = \Area \triangle GKE + | c = [[Triangle Angle-Angle-Side Equality]]: $(1)$, $(2)$ and $(3)$ +}} +{{end-eqn}} +Similarly: +{{begin-eqn}} +{{eqn | n = 4 + | l = \angle HLA + | r = \angle GLE + | c = [[Two Straight Lines make Equal Opposite Angles]] +}} +{{eqn | l = \angle EGL + | r = \text {Right Angle} + | c = [[Tangent to Circle is Perpendicular to Radius]] +}} +{{eqn | l = \angle AHL + | r = \text {Right Angle} + | c = as $HJ \perp AD$ +}} +{{eqn | n = 5 + | ll= \therefore + | l = \angle EGL + | r = \angle AHL + | c = [[Axiom:Euclid's Fourth Postulate|Euclid's Fourth Postulate]] +}} +{{eqn | l = HA + | r = EF + | c = [[Opposite Sides and Angles of Parallelogram are Equal]] +}} +{{eqn | l = EF + | r = EG + | c = as both are [[Definition:Radius of Circle|radii]] of the same [[Definition:Circle|circle]] +}} +{{eqn | n = 6 + | ll= \therefore + | l = HA + | r = EG + | c = [[Axiom:Euclid's Common Notion 1|Euclid's First Common Notion]] +}} +{{eqn | ll= \therefore + | l = \Area \triangle HAL + | r = \Area \triangle GEL + | c = [[Triangle Angle-Angle-Side Equality]]: $(4)$, $(5)$ and $(6)$ +}} +{{end-eqn}} +Finally: +{{begin-eqn}} +{{eqn | l = \frac {\Area \Box ABCD} 2 + | r = \frac {AD \cdot CD} 2 + | c = [[Area of Parallelogram]] +}} +{{eqn | r = \Area \triangle ADC + | c = [[Area of Triangle in Terms of Side and Altitude]] +}} +{{eqn | r = \Area \triangle HAL + \Area \triangle IKC + \Area \Box DHLKI +}} +{{eqn | r = \Area \triangle GEL + \Area \triangle GKE + \Area \Box DHLKI +}} +{{eqn | r = \Area \Box DHEI +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +The crucial geometric truth to note is that: +:$CJ = CG, AG = AF, BF = BJ$ +This follows from the fact that: +:$\triangle CEJ \cong \triangle CEG$, $\triangle AEF \cong \triangle AEG$ and $\triangle BEF \cong \triangle BEJ$ +This is a direct consequence of the point $E$ being the [[Definition:Center of Circle|center]] of the [[Definition:Incircle of Triangle|incircle]] of $\triangle ABC$. +Then it is just a matter of algebra. +Let $AF = a, FB = b, CJ = c$. +{{begin-eqn}} +{{eqn | l = \paren {a + b}^2 + \paren {b + c}^2 + | r = \paren {a + c}^2 + | c = [[Pythagoras's Theorem]] +}} +{{eqn | ll= \leadsto + | l = a^2 + 2 a b + b^2 + b^2 + 2 b c + c^2 + | r = a^2 + 2 a c + c^2 + | c = +}} +{{eqn | ll= \leadsto + | l = a b + b^2 + b c + | r = a c + | c = +}} +{{eqn | ll= \leadsto + | l = a b + b^2 + b c + a c + | r = 2 a c + | c = +}} +{{eqn | ll= \leadsto + | l = \paren {a + b} \paren {b + c} + | r = 2 a c + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Westwood's Puzzle/Proof 1} +Tags: Euclidean Geometry + +\begin{theorem} +:[[File:WestwoodsPuzzle.png|500px]] +Take any [[Definition:Rectangle|rectangle]] $ABCD$ and draw the [[Definition:Diagonal of Quadrilateral|diagonal]] $AC$. +[[Definition:Incircle of Triangle|Inscribe]] a [[Definition:Circle|circle]] $GFJ$ in one of the resulting [[Definition:Triangle (Geometry)|triangles]] $\triangle ABC$. +Drop [[Definition:Perpendicular|perpendiculars]] $IEF$ and $HEJ$ from the [[Definition:Incenter of Triangle|center of this incircle]] $E$ to the [[Definition:Side of Polygon|sides]] of the [[Definition:Rectangle|rectangle]]. +Then the [[Definition:Area|area]] of the [[Definition:Rectangle|rectangle]] $DHEI$ equals half the [[Definition:Area|area]] of the [[Definition:Rectangle|rectangle]] $ABCD$. +\end{theorem} + +\begin{proof} +[[Perpendicular through Given Point|Construct the perpendicular]] from $E$ to $AC$, and call its foot $G$. +Let $K$ be the [[Definition:Intersection (Geometry)|intersection]] of $IE$ and $AC$. +Let $L$ be the [[Definition:Intersection (Geometry)|intersection]] of $EH$ and $AC$. +:[[File:Westwood's Puzzle Proof.png|500px]] +First we have: +{{begin-eqn}} +{{eqn | n = 1 + | l = \angle CKI + | r = \angle EKG + | c = [[Two Straight Lines make Equal Opposite Angles]] +}} +{{eqn | l = \angle EGK + | r = \text {Right Angle} + | c = [[Tangent to Circle is Perpendicular to Radius]] +}} +{{eqn | l = \angle KIC + | r = \text {Right Angle} + | c = as $IF \perp CD$ +}} +{{eqn | n = 2 + | ll= \therefore + | l = \angle EGK + | r = \angle KIC + | c = [[Axiom:Euclid's Fourth Postulate|Euclid's Fourth Postulate]] +}} +{{eqn | l = IC + | r = EJ + | c = [[Opposite Sides and Angles of Parallelogram are Equal]] +}} +{{eqn | l = EJ + | r = EG + | c = as both are [[Definition:Radius of Circle|radii]] of the same [[Definition:Circle|circle]] +}} +{{eqn | n = 3 + | ll= \therefore + | l = IC + | r = EG + | c = [[Axiom:Euclid's Common Notion 1|Euclid's First Common Notion]] +}} +{{eqn | ll= \therefore + | l = \Area \triangle IKC + | r = \Area \triangle GKE + | c = [[Triangle Angle-Angle-Side Equality]]: $(1)$, $(2)$ and $(3)$ +}} +{{end-eqn}} +Similarly: +{{begin-eqn}} +{{eqn | n = 4 + | l = \angle HLA + | r = \angle GLE + | c = [[Two Straight Lines make Equal Opposite Angles]] +}} +{{eqn | l = \angle EGL + | r = \text {Right Angle} + | c = [[Tangent to Circle is Perpendicular to Radius]] +}} +{{eqn | l = \angle AHL + | r = \text {Right Angle} + | c = as $HJ \perp AD$ +}} +{{eqn | n = 5 + | ll= \therefore + | l = \angle EGL + | r = \angle AHL + | c = [[Axiom:Euclid's Fourth Postulate|Euclid's Fourth Postulate]] +}} +{{eqn | l = HA + | r = EF + | c = [[Opposite Sides and Angles of Parallelogram are Equal]] +}} +{{eqn | l = EF + | r = EG + | c = as both are [[Definition:Radius of Circle|radii]] of the same [[Definition:Circle|circle]] +}} +{{eqn | n = 6 + | ll= \therefore + | l = HA + | r = EG + | c = [[Axiom:Euclid's Common Notion 1|Euclid's First Common Notion]] +}} +{{eqn | ll= \therefore + | l = \Area \triangle HAL + | r = \Area \triangle GEL + | c = [[Triangle Angle-Angle-Side Equality]]: $(4)$, $(5)$ and $(6)$ +}} +{{end-eqn}} +Finally: +{{begin-eqn}} +{{eqn | l = \frac {\Area \Box ABCD} 2 + | r = \frac {AD \cdot CD} 2 + | c = [[Area of Parallelogram]] +}} +{{eqn | r = \Area \triangle ADC + | c = [[Area of Triangle in Terms of Side and Altitude]] +}} +{{eqn | r = \Area \triangle HAL + \Area \triangle IKC + \Area \Box DHLKI +}} +{{eqn | r = \Area \triangle GEL + \Area \triangle GKE + \Area \Box DHLKI +}} +{{eqn | r = \Area \Box DHEI +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Westwood's Puzzle/Proof 2} +Tags: Euclidean Geometry + +\begin{theorem} +:[[File:WestwoodsPuzzle.png|500px]] +Take any [[Definition:Rectangle|rectangle]] $ABCD$ and draw the [[Definition:Diagonal of Quadrilateral|diagonal]] $AC$. +[[Definition:Incircle of Triangle|Inscribe]] a [[Definition:Circle|circle]] $GFJ$ in one of the resulting [[Definition:Triangle (Geometry)|triangles]] $\triangle ABC$. +Drop [[Definition:Perpendicular|perpendiculars]] $IEF$ and $HEJ$ from the [[Definition:Incenter of Triangle|center of this incircle]] $E$ to the [[Definition:Side of Polygon|sides]] of the [[Definition:Rectangle|rectangle]]. +Then the [[Definition:Area|area]] of the [[Definition:Rectangle|rectangle]] $DHEI$ equals half the [[Definition:Area|area]] of the [[Definition:Rectangle|rectangle]] $ABCD$. +\end{theorem} + +\begin{proof} +The crucial geometric truth to note is that: +:$CJ = CG, AG = AF, BF = BJ$ +This follows from the fact that: +:$\triangle CEJ \cong \triangle CEG$, $\triangle AEF \cong \triangle AEG$ and $\triangle BEF \cong \triangle BEJ$ +This is a direct consequence of the point $E$ being the [[Definition:Center of Circle|center]] of the [[Definition:Incircle of Triangle|incircle]] of $\triangle ABC$. +Then it is just a matter of algebra. +Let $AF = a, FB = b, CJ = c$. +{{begin-eqn}} +{{eqn | l = \paren {a + b}^2 + \paren {b + c}^2 + | r = \paren {a + c}^2 + | c = [[Pythagoras's Theorem]] +}} +{{eqn | ll= \leadsto + | l = a^2 + 2 a b + b^2 + b^2 + 2 b c + c^2 + | r = a^2 + 2 a c + c^2 + | c = +}} +{{eqn | ll= \leadsto + | l = a b + b^2 + b c + | r = a c + | c = +}} +{{eqn | ll= \leadsto + | l = a b + b^2 + b c + a c + | r = 2 a c + | c = +}} +{{eqn | ll= \leadsto + | l = \paren {a + b} \paren {b + c} + | r = 2 a c + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Vector Cross Product Operator is Bilinear} +Tags: Vector Cross Product + +\begin{theorem} +Let $\mathbf u$, $\mathbf v$ and $\mathbf w$ be [[Definition:Vector (Linear Algebra)|vectors]] in a [[Definition:Vector Space|vector space]] $\mathbf V$ of [[Definition:Dimension of Vector Space|$3$ dimensions]]: +{{begin-eqn}} +{{eqn | l = \mathbf u + | r = u_i \mathbf i + u_j \mathbf j + u_k \mathbf k +}} +{{eqn | l = \mathbf v + | r = v_i \mathbf i + v_j \mathbf j + v_k \mathbf k +}} +{{eqn | l = \mathbf w + | r = w_i \mathbf i + w_j \mathbf j + w_k \mathbf k +}} +{{end-eqn}} +where $\left({\mathbf i, \mathbf j, \mathbf k}\right)$ is the [[Definition:Standard Ordered Basis on Vector Space|standard ordered basis]] of $\mathbf V$. +Let $c$ be a [[Definition:Real Number|real number]]. +Then: +: $\left({c \mathbf u + \mathbf v}\right) \times \mathbf w = c \left({ \mathbf u \times \mathbf w}\right) + \mathbf v \times \mathbf w$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \left({c \mathbf u + \mathbf v}\right) \times \mathbf w + | r = \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ c u_i + v_i & c u_j + v_j & c u_k + v_k \\ w_i & w_j & w_k \end{vmatrix} + | c = {{Defof|Vector Cross Product}} +}} +{{eqn | r = \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ c u_i & c u_j & c u_k \\ w_i & w_j & w_k \end{vmatrix} + \begin{vmatrix} \mathbf i& \mathbf j & \mathbf k \\ v_i & v_j & v_k \\ w_i & w_j & w_k \end{vmatrix} + | c = [[Determinant as Sum of Determinants]] +}} +{{eqn | r = c \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ u_i & u_j & u_k \\ w_i & w_j & w_k \end{vmatrix} + \begin{vmatrix} \mathbf i & \mathbf j & \mathbf k \\ v_i & v_j & v_k \\ w_i & w_j & w_k \end{vmatrix} + | c = [[Determinant with Row Multiplied by Constant]] +}} +{{eqn | r = c \left({\mathbf u \times \mathbf w}\right) + \mathbf v \times \mathbf w + | c = {{Defof|Vector Cross Product}} +}} +{{end-eqn}} +{{qed}} +[[Category:Vector Cross Product]] +bhw03ywmrmsthq70e9dmqa9snur0fg6 +\end{proof}<|endoftext|> +\section{Natural Numbers have No Proper Zero Divisors} +Tags: Natural Numbers + +\begin{theorem} +Let $\N$ be the [[Definition:Natural Numbers|natural numbers]]. +Then for all $m, n \in \N$: +:$m \times n = 0 \iff m = 0 \lor n = 0$ +That is, $\N$ has no [[Definition:Proper Zero Divisor|proper zero divisors]]. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Suppose that $n = 0$ or $m = 0$. +Then from [[Zero is Zero Element for Natural Number Multiplication]]: +:$m \times n = 0$ +{{qed|lemma}} +=== Sufficient Condition === +Let $m \times n = 0$. +Suppose [[Definition:WLOG|WLOG]] that $n \ne 0$. +{{begin-eqn}} +{{eqn | l = n + | o = \ne + | r = 0 + | c = +}} +{{eqn | ll= \implies + | l = 1 + | o = \le + | r = n + | c = Definition of [[Definition:One|One]] +}} +{{eqn | ll= \implies + | l = m \times n + | r = m \times \left({\left({n - 1}\right) + 1}\right) + | c = Definition of [[Definition:Difference (Natural Numbers)|Difference]] +}} +{{eqn | r = m \times \left({n - 1}\right) + m + | c = [[Natural Number Multiplication Distributes over Addition]] +}} +{{eqn | ll= \implies + | l = 0 \le m + | o = \le + | r = m \times \left({n - 1}\right) + m + | c = +}} +{{end-eqn}} +But as: +: $m \times \left({n - 1}\right) \circ m = m \times n = 0$ +it follows that: +:$0 \le m \le 0$ +and so as $\le$ is [[Definition:Antisymmetric Relation|antisymmetric]], it follows that $m = 0$. +{{qed}} +\end{proof}<|endoftext|> +\section{Diagonals of Rhombus Bisect Angles} +Tags: Euclidean Geometry, Vector Algebra, Parallelograms, Diagonals of Rhombus Bisect Angles + +\begin{theorem} +Let $OABC$ be a [[Definition:Rhombus|rhombus]]. +Then: +:$(1): \quad OB$ [[Definition:Angle Bisector|bisects]] $\angle AOC$ and $\angle ABC$ +:$(2): \quad AC$ [[Definition:Angle Bisector|bisects]] $\angle OAB$ and $\angle OCB$ +[[File:RhombusBisectAngles.png|400px]] +\end{theorem} + +\begin{proof} +{{WLOG}}, we will only prove $OB$ [[Definition:Angle Bisector|bisects]] $\angle AOC$. +We have: +{{begin-eqn}} +{{eqn | l = OA + | r = OC + | c = {{Defof|Rhombus}} +}} +{{eqn | l = BA + | r = BC + | c = {{Defof|Rhombus}} +}} +{{eqn | l = OB + | r = OB + | c = Common Side +}} +{{eqn | ll = \leadsto + | l = \triangle OAB + | o = \cong + | r = \triangle OCB + | c = [[SSS]] +}} +{{end-eqn}} +Comparing corresponding angles gives: +:$\angle AOB = \angle COB$ +hence $OB$ [[Definition:Angle Bisector|bisects]] $\angle AOC$. +{{qed}} +\end{proof} + +\begin{proof} +{{WLOG}}, we will only prove $OB$ [[Definition:Angle Bisector|bisects]] $\angle AOC$. +Let the [[Definition:Vector (Euclidean Space)|position vector]] of $A$, $B$ and $C$ with respect to $O$ be $\mathbf a$, $\mathbf b$ and $\mathbf c$ respectively. +By definition of [[Definition:Rhombus|rhombus]], we have: +{{begin-eqn}} +{{eqn | n = a + | l = \mathbf a + \mathbf c + | r = \mathbf b + | c = [[Parallelogram Law]] +}} +{{eqn | n = b + | l = \norm {\mathbf a} + | r = \norm {\mathbf c} + | c = +}} +{{end-eqn}} +From the above we have: +{{begin-eqn}} +{{eqn | l = \cos \angle \mathbf a, \mathbf b + | r = \frac {\mathbf a \cdot \mathbf b} {\norm {\mathbf a} \norm {\mathbf b} } + | c = {{Defof|Dot Product|index = 2}} +}} +{{eqn | r = \frac {\mathbf a \cdot \paren {\mathbf a + \mathbf c} } {\norm {\mathbf a} \norm {\mathbf b} } + | c = from $(a)$ above: $\mathbf b = \mathbf a + \mathbf c$ +}} +{{eqn | r = \frac {\mathbf a \cdot \mathbf a + \mathbf a \cdot \mathbf c} {\norm {\mathbf a} \norm {\mathbf b} } + | c = [[Dot Product Distributes over Addition]] +}} +{{eqn | r = \frac { {\norm {\mathbf a} }^2 + \mathbf a \cdot \mathbf c} {\norm {\mathbf a} \norm {\mathbf b} } + | c = [[Dot Product of Vector with Itself]] +}} +{{eqn | r = \frac { {\norm {\mathbf c} }^2 + \mathbf a \cdot \mathbf c} {\norm {\mathbf c} \norm {\mathbf b} } + | c = from $(b)$ above: $\norm {\mathbf a} = \norm {\mathbf c}$ +}} +{{eqn | r = \frac {\mathbf c \cdot \mathbf c + \mathbf a \cdot \mathbf c} {\norm {\mathbf c} \norm {\mathbf b} } + | c = [[Dot Product of Vector with Itself]] +}} +{{eqn | r = \frac {\mathbf c \cdot \left({\mathbf a + \mathbf c}\right)} {\norm {\mathbf c} \norm {\mathbf b} } + | c = [[Dot Product Distributes over Addition]] +}} +{{eqn | r = \frac {\mathbf c \cdot \mathbf b} {\norm {\mathbf c} \norm {\mathbf b} } + | c = from $(a)$ above: $\mathbf b = \mathbf a + \mathbf c$ +}} +{{eqn | r = \cos \angle \mathbf c, \mathbf b + | c = {{Defof|Dot Product|index = 2}} +}} +{{end-eqn}} +By definition of [[Definition:Dot Product/Definition 2|dot product]], the angle between the vectors is between $0$ and $\pi$. +From [[Shape of Cosine Function]], [[Definition:Cosine|cosine]] is [[Definition:Injection|injective]] on this interval. +Hence: +:$\angle \mathbf a, \mathbf b = \angle \mathbf c, \mathbf b$ +The result follows. +{{qed}} +\end{proof}<|endoftext|> +\section{Summation Formula for Polygonal Numbers} +Tags: Polygonal Numbers + +\begin{theorem} +Let $P \left({k, n}\right)$ be the $n$th [[Definition:Polygonal Number|$k$-gonal number]]. +Then: +: $\displaystyle P \left({k, n}\right) = \sum_{j \mathop = 1}^n \left({\left({k - 2}\right) \left({j - 1}\right) + 1}\right)$ +\end{theorem} + +\begin{proof} +We have that: +$P \left({k, n}\right) = \begin{cases} +0 & : n = 0 \\ +P \left({k, n - 1}\right) + \left({k - 2}\right) \left({n - 1}\right) + 1 & : n > 0 +\end{cases}$ +Proof by [[Principle of Mathematical Induction|induction]]: +For all $n \in \N_{>0}$, let $\Pi \left({n}\right)$ be the [[Definition:Proposition|proposition]]: +: $\displaystyle P \left({k, n}\right) = \sum_{j \mathop = 1}^n \left({\left({k - 2}\right) \left({j - 1}\right) + 1}\right)$ +=== Basis for the Induction === +$\Pi(1)$ is the statement that $P \left({k, 1}\right) = 1$. +This follows directly from: +{{begin-eqn}} +{{eqn | l = P \left({k, 1}\right) + | r = P \left({k, 0}\right) + \left({k - 2}\right) \left({0}\right) + 1 +}} +{{eqn | r = 1 +}} +{{end-eqn}} +This is our [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now we need to show that, if $\Pi \left({r}\right)$ is true, where $r \ge 1$, then it logically follows that $\Pi \left({r + 1}\right)$ is true. +So this is our [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$\displaystyle P \left({k, r}\right) = \sum_{j \mathop = 1}^r \left({\left({k - 2}\right) \left({j - 1}\right) + 1}\right)$ +Then we need to show: +:$\displaystyle P \left({k, r + 1}\right) = \sum_{j \mathop = 1}^{r + 1} \left({\left({k - 2}\right) \left({j - 1}\right) + 1}\right)$ +=== Induction Step === +This is our [[Principle of Mathematical Induction#Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = P \left({k, r + 1}\right) + | r = P \left({k, r}\right) + \left({k - 2}\right) r + 1 +}} +{{eqn | r = \sum_{j \mathop = 1}^r \left({\left({k - 2}\right) \left({j - 1}\right) + 1}\right) + \left({k - 2}\right) r + 1 + | c = from the [[Summation Formula for Polygonal Numbers#Induction Hypothesis|induction hypothesis]] +}} +{{eqn | r = \sum_{j \mathop = 1}^{r + 1} \left({\left({k - 2}\right)\left({j - 1}\right) + 1}\right) +}} +{{end-eqn}} +So $\Pi \left({r}\right) \implies \Pi \left({r + 1}\right)$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +: $\displaystyle \forall n \in \N: P \left({k, n}\right) = \sum_{j \mathop = 1}^n \left({\left({k - 2}\right)\left({j - 1}\right) + 1}\right)$ +{{qed}} +[[Category:Polygonal Numbers]] +p2eohhczq1q05egn2ayjve0pcuzimhw +\end{proof}<|endoftext|> +\section{Divergent Sequence with Finite Number of Terms Deleted is Divergent} +Tags: Series, Sequences, Convergence Tests + +\begin{theorem} +Let $\left({X, d}\right)$ be a [[Definition:Metric Space|metric space]]. +Let $\left \langle {x_k} \right \rangle$ be a [[Definition:Sequence|sequence in $X$]]. +Let $\left \langle {x_k} \right \rangle$ be [[Definition:Divergent Sequence (Metric Space)|divergent]]. +Let a [[Definition:Finite Set|finite]] number of [[Definition:Term of Sequence|terms]] be deleted from $\left \langle {x_k} \right \rangle$. +Then the resulting [[Definition:Subsequence|subsequence]] is [[Definition:Divergent Sequence (Metric Space)|divergent]]. +\end{theorem}<|endoftext|> +\section{Relative Prime Modulo Tensor is Zero} +Tags: Tensor Algebra + +\begin{theorem} +Let $p \in \Z_{>0}$ and $q \in \Z_{>0}$ be [[Definition:Strictly Positive Integer|positive]] [[Definition:Coprime Integers|coprime integers]]. +Let [[Definition:Ring of Integers Modulo m|$\Z / p \Z$]] and $\Z / q \Z$ be $\Z$-[[Definition:Module|modules]]. +{{explain|It is not a good idea to use the same notation for both a ring and a module. Either $\Z / p \Z$ is a ring or it is a module. Please consider taking the advice in the explain template at the bottom of this page.}} +Then: +:$\Z / p \Z \otimes_\Z \Z / q\Z = 0$ +where $\otimes_\Z$ denotes [[Definition:Tensor Product of Modules|tensor product]] over integers. +\end{theorem} + +\begin{proof} +By [[Bézout's Lemma]] there exists $a, b \in \Z$ such that $a p + b q = 1$. +Then for $s \otimes_\Z t \in \Z / p \Z \otimes \Z / q \Z$: +{{begin-eqn}} +{{eqn | l = s \otimes t + | r = (s \left({a p + b q}\right)) \otimes t + | c = $s = s \cdot 1$ +}} +{{eqn | r = (s a p + s b q) \otimes t + | c = By [[Definition:Module|module axiom 2]] +}} +{{eqn | r = s b q \otimes t + s a p \otimes t + | c = By equality in [[Definition:Tensor Product of Modules|tensor product]] +}} +{{eqn | r = s b \otimes q t + s a p \otimes t + | c = By equality in [[Definition:Tensor Product of Modules|tensor product]] +}} +{{eqn | r = 0 + | c = by [[Tensor with Zero Element is Zero in Tensor]] and the fact that $qt = 0$ in $\Z_q$ and $sap=0$ in $\Z_p$ +}} +{{end-eqn}} +{{qed}} +{{explain|Notation needs to be tightened up. When the fundamental stuff like this is being addressed, it is important to explain exactly which operation of which component of the module is being used: the scalar product, or the group operation, or whichever of the ring operations. It is inadequate to use mere concatenation, as it is never completely clear which elements of which structure each one is. As for the rest of this proof, it is still impenetrably vague. The reason for the last line is a complete mystery -- the actual definition of $0$ also needs to be stated.}} +\end{proof}<|endoftext|> +\section{Tensor Product is Module} +Tags: Tensor Algebra, Module Theory + +\begin{theorem} +Let $R$ be a [[Definition:Ring (Abstract Algebra)|ring]]. +Let $M$ be a $R$-[[Definition:Right Module|right module]]. +Let $N$ be a $R$-[[Definition:Left Module|left module]]. +Then: +:$T = \displaystyle \bigoplus_{s \mathop \in M \mathop \times N} R s$ +is a [[Definition:Left Module|left module]]. +\end{theorem} + +\begin{proof} +=== Axiom 1 === +Let $x, y \in T$ with [[Definition:Module Direct Product|$x = (s_i)_{i\in I}$ and $y = (t_i)_{i\in I}$.]] +Let $\lambda\in R$. +Then: +{{begin-eqn}} +{{eqn | l = \lambda \circ (x + y) + | r = \lambda \circ ((s_i)_{i\in I} + (t_i)_{i\in I}) + | c = By definition of elements in [[Definition:Module Direct Sum|direct sum]] +}} +{{eqn | r = \lambda \circ (s_i + t_i)_{i\in I} + | c = By addition in [[Definition:Module Direct Sum|direct sum]] +}} +{{eqn | r = (\lambda \circ s_i + \lambda \circ t_i)_{i\in I} + | c = By $R$-action in [[Definition:Module Direct Sum|direct sum]] +}} +{{eqn | r = (\lambda \circ s_i)_{i\in I} + (\lambda \circ t_i)_{i\in I} + | c = By addition in [[Definition:Module Direct Sum|direct sum]] +}} +{{eqn | r = \lambda \circ (s_i)_{i\in I} + \lambda \circ (t_i)_{i\in I} + | c = By $R$-action in [[Definition:Module Direct Sum|direct sum]] +}} +{{eqn | r = \lambda \circ x + \lambda \circ y + | c = By definition of elements in [[Definition:Module Direct Sum|direct sum]] +}} +{{end-eqn}} +{{qed|lemma}} +=== Axiom 2 === +Let $x \in T$ with $x = (s_i)_{i\in I}$ +Let $\lambda, \mu \in R$. +Then: +{{begin-eqn}} +{{eqn | l = \left({\lambda + \mu}\right) \circ x + | r = (\lambda+\mu) \circ (s_i)_{i\in I} + | c = By definition of elements in [[Definition:Module Direct Sum|direct sum]] +}} +{{eqn | r = ((\lambda + \mu) \circ s_i)_{i\in I} + | c = By definition or $R$-action in direct sum +}} +{{eqn | r = (\lambda \circ s_i + \mu\circ s_i)_{i\in I} + | c = By definition or $R$-action in modules +}} +{{eqn | r = (\lambda \circ s_i)_{i\in I} + (\mu\circ s_i)_{i\in I} + | c = By definition or sum in direct sum +}} +{{eqn | r = \lambda \circ (s_i)_{i\in I} + \mu\circ (s_i)_{i\in I} + | c = By definition of $R$-action on direct sum +}} +{{eqn | r = \lambda \circ x + \mu\circ x + | c = By original equality +}} +{{end-eqn}} +{{qed|lemma}} +=== Axiom 3 === +Let $x\in T$ with $x = (s_i)_{i\in I}$. +Let $\lambda, \mu \in R$. +Then: +{{begin-eqn}} +{{eqn | l = (\lambda \times \mu) \circ x + | r = (\lambda \times \mu) \circ (s_i)_{i\in I} + | c = By original equality +}} +{{eqn | r = ((\lambda \times\mu) \circ s_i)_{i\in I} + | c = By Definition of $R$-action on direct sum +}} +{{eqn | r = (\lambda \circ (\mu\circ s_i))_{i\in I} + | c = By definition of modules +}} +{{eqn | r = \lambda \circ (\mu \circ s_i)_{i\in I} + | c = Definition of $R$-action on direct sum +}} +{{eqn | r = \lambda \circ \left({\mu \circ x}\right) + | c = By original equality +}} +{{end-eqn}} +{{qed}} +[[Category:Tensor Algebra]] +[[Category:Module Theory]] +dzo7uolnglmq24k5wog3smx85vt5h3o +\end{proof}<|endoftext|> +\section{Supremum of Subset of Real Numbers is Arbitrarily Close} +Tags: Real Analysis + +\begin{theorem} +Let $A \subseteq \R$ be a [[Definition:Subset|subset]] of the [[Definition:Real Number|real numbers]]. +Let $b$ be a [[Definition:Supremum of Subset of Real Numbers|supremum]] of $A$. +Let $\epsilon \in \R_{>0}$. +Then: +:$\exists x \in A: b − x < \epsilon$ +\end{theorem} + +\begin{proof} +Note that $A$ is [[Definition:Non-Empty Set|non-empty]] as the [[Definition:Empty Set|empty set]] does not admit a [[Definition:Supremum of Subset of Real Numbers|supremum]] (in $\R$). +Suppose $\epsilon \in \R_{>0}$ such that: +:$\forall x \in A: b − x \ge \epsilon$ +Then: +:$\forall x \in A: b − \epsilon \ge x$ +and so $b − \epsilon$ would be an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] of $A$ which is less than $b$. +But since $b$ is a [[Definition:Supremum of Subset of Real Numbers|supremum]] of $A$ there can be no such $b − \epsilon$. +From that [[Proof by Contradiction|contradiction]] it follows that: +:$\exists x \in A: b − x < \epsilon$ +{{qed}} +\end{proof}<|endoftext|> +\section{Tensor with Zero Element is Zero in Tensor} +Tags: Tensor Algebra, Homological Algebra + +\begin{theorem} +Let $R$ be a [[Definition:Ring (Abstract Algebra)|ring]]. +Let $M$ be a [[Definition:Right Module|right $R$-module]]. +Let $N$ be a [[Definition:Left Module|left $R$-module]]. +Let $M \otimes_R N$ denote their [[Definition:Tensor Product of Modules|tensor product]]. +Then: +:$0\otimes_R n = m \otimes_R 0 = 0 \otimes_R 0$ +is the [[Definition:Zero of Tensor Product|zero]] in $M \otimes_R N$. +\end{theorem} + +\begin{proof} +Let $m \in M$ and $n \in N$ +Then +{{begin-eqn}} +{{eqn | l = m \otimes_R n + | r = \paren {m + 0} \otimes_R n + | c = {{GroupAxiom|2}} +}} +{{eqn | r = m \otimes_R n + 0 \otimes_R n + | c = {{Defof|Tensor Equality}} +}} +{{eqn | r = m \otimes_R \paren {n + 0} + | c = {{GroupAxiom|2}} +}} +{{eqn | r = m \otimes_R n + m\otimes_R 0 + | c = {{Defof|Tensor Equality}} +}} +{{eqn | r = m \otimes_R n + 0 \otimes_R \paren {n + 0} + | c = {{GroupAxiom|2}} +}} +{{eqn | r = m \otimes_R n + m \otimes_R 0 + 0 \otimes_R n + 0 \otimes_R 0 + | c = {{Defof|Tensor Equality}} +}} +{{eqn | r = m \otimes_R n + 0 \otimes_R 0 + | c = {{Defof|Tensor Equality}} +}} +{{end-eqn}} +Hence $0 \otimes_R n$, $m \otimes_R 0$ and $0 \otimes_R 0$ must all be [[Definition:Identity Element|identity elements]] for $M \otimes_R N$ as a [[Definition:Left Module|left module]]. +{{qed}} +[[Category:Tensor Algebra]] +[[Category:Homological Algebra]] +3plttq0y0vknwtgupk53xvo8d8yew9f +\end{proof}<|endoftext|> +\section{Primitive of Arcsecant of x over a/Formulation 1} +Tags: Primitives involving Inverse Secant Function + +\begin{theorem} +:$\displaystyle \int \operatorname{arcsec} \frac x a \ \mathrm d x = \begin{cases} +\displaystyle x \operatorname{arcsec} \frac x a - a \ln \left({x + \sqrt {x^2 - a^2} }\right) + C & : 0 < \operatorname{arcsec} \dfrac x a < \dfrac \pi 2 \\ +\displaystyle x \operatorname{arcsec} \frac x a + a \ln \left({x + \sqrt {x^2 - a^2} }\right) + C & : \dfrac \pi 2 < \operatorname{arcsec} \dfrac x a < \pi \\ +\end{cases}$ +\end{theorem} + +\begin{proof} +With a view to expressing the [[Definition:Primitive (Calculus)|primitive]] in the form: +:$\displaystyle \int u \frac {\mathrm d v}{\mathrm d x} \ \mathrm d x = u v - \int v \frac {\mathrm d u}{\mathrm d x} \ \mathrm d x$ +let: +{{begin-eqn}} +{{eqn | l = u + | r = \operatorname{arcsec} \frac x a + | c = +}} +{{eqn | ll= \implies + | l = \frac {\mathrm d u} {\mathrm d x} + | r = \begin{cases} \dfrac a {x \sqrt {x^2 - a^2} } & : 0 < \operatorname{arcsec} \dfrac x a < \dfrac \pi 2 \\ +\dfrac {-a} {x \sqrt {x^2 - a^2} } & : \dfrac \pi 2 < \operatorname{arcsec} \dfrac x a < \pi \\ +\end{cases} + | c = [[Derivative of Arcsecant of x over a|Derivative of $\operatorname{arcsec} \dfrac x a$]] +}} +{{end-eqn}} +and let: +{{begin-eqn}} +{{eqn | l = \frac {\mathrm d v} {\mathrm d x} + | r = 1 + | c = +}} +{{eqn | ll= \implies + | l = v + | r = x + | c = [[Primitive of Constant]] +}} +{{end-eqn}} +First let $\operatorname{arcsec} \dfrac x a$ be in the [[Definition:Open Real Interval|interval]] $\left({0 \,.\,.\,\dfrac \pi 2}\right)$. +Then: +{{begin-eqn}} +{{eqn | l = \int \operatorname{arcsec} \frac x a \ \mathrm d x + | r = x \operatorname{arcsec} \frac x a - \int x \left({\frac a {x \sqrt {x^2 - a^2} } }\right) \ \mathrm d x + C + | c = [[Integration by Parts]] +}} +{{eqn | r = x \operatorname{arcsec} \frac x a - a \int \frac {\mathrm d x} {\sqrt {x^2 - a^2} } + C + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = x \operatorname{arcsec} \frac x a - a \ln \left({x + \sqrt {x^2 - a^2} }\right) + C + | c = [[Primitive of Reciprocal of Root of x squared minus a squared/Logarithm Form|Primitive of $\dfrac 1 {\sqrt {x^2 - a^2} }$]] +}} +{{end-eqn}} +Similarly, let $\operatorname{arcsec} \dfrac x a$ be in the [[Definition:Open Real Interval|interval]] $\left({\dfrac \pi 2 \,.\,.\, \pi}\right)$. +Then: +{{begin-eqn}} +{{eqn | l = \int \operatorname{arcsec} \frac x a \ \mathrm d x + | r = x \operatorname{arcsec} \frac x a - \int x \left({\frac {-a} {x \sqrt {x^2 - a^2} } }\right) \ \mathrm d x + C + | c = [[Integration by Parts]] +}} +{{eqn | r = x \operatorname{arcsec} \frac x a + a \int \frac {\mathrm d x} {\sqrt {x^2 - a^2} } + C + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = x \operatorname{arcsec} \frac x a + a \ln \left({x + \sqrt {x^2 - a^2} }\right) + C + | c = [[Primitive of Reciprocal of Root of x squared minus a squared/Logarithm Form|Primitive of $\dfrac 1 {\sqrt {x^2 - a^2} }$]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Primitive of Arcsecant of x over a/Formulation 2} +Tags: Primitives involving Inverse Secant Function + +\begin{theorem} +:$\displaystyle \int \arcsec \frac x a \rd x = x \arcsec \frac x a - a \ln \size {x + \sqrt {x^2 - a^2} } + C$ +for $x^2 > 1$. +$\displaystyle \arcsec \frac x a$ is undefined on the [[Definition:Real Numbers|real numbers]] for $x^2 < 1$. +\end{theorem} + +\begin{proof} +With a view to expressing the [[Definition:Primitive (Calculus)|primitive]] in the form: +:$\displaystyle \int u \frac {\d v} {\d x} \rd x = u v - \int v \frac {\d u} {\d x} \rd x$ +let: +{{begin-eqn}} +{{eqn | l = u + | r = \arcsec \frac x a + | c = +}} +{{eqn | ll= \leadsto + | l = \frac {\d u} {\d x} + | r = \dfrac a {\size x \sqrt {x^2 - a^2} } + | c = [[Derivative of Arcsecant of x over a|Derivative of $\arcsec \dfrac x a$]] +}} +{{end-eqn}} +and let: +{{begin-eqn}} +{{eqn | l = \frac {\d v} {\d x} + | r = 1 + | c = +}} +{{eqn | ll= \leadsto + | l = v + | r = x + | c = [[Primitive of Constant]] +}} +{{end-eqn}} +We then have: +{{begin-eqn}} +{{eqn | l = \int \arcsec \frac x a \rd x + | r = x \arcsec \frac x a - \int x \paren {\dfrac a {\size x \sqrt {x^2 - a^2} } } \rd x + C + | c = [[Integration by Parts]] +}} +{{eqn | n = 1 + | r = x \arcsec \frac x a - \int \frac x {\size x} \paren {\frac a {\sqrt {x^2 - a^2} } } \rd x + C + | c = rearrangement +}} +{{end-eqn}} +Let $x > 1$. +Then: +{{begin-eqn}} +{{eqn | l = \int \frac x {\size x} \paren {\frac a {\sqrt {x^2 - a^2} } } \rd x + | r = \int \frac a { {\sqrt {x^2 - a^2} } } \rd x + | c = {{Defof|Absolute Value}} +}} +{{eqn | r = a \int \frac 1 { {\sqrt {x^2 - a^2} } } \rd x + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = a \size {\ln \size {x + \sqrt {x^2 - a^2} } } + C + | c = [[Integral of One Over Square Root of Binomial]] +}} +{{eqn | r = a \ln \size {x + \sqrt {x^2 - a^2} } + C + | c = as argument of [[Definition:Natural Logarithm|logarithm]] is [[Definition:Strictly Positive Real Number|positive]] +}} +{{end-eqn}} +Similarly, let $x < -1$. +Then: +{{begin-eqn}} +{{eqn | l = \int \frac x {\size x} \paren {\frac a {\sqrt {x^2 - a^2} } } \rd x + | r = \int \paren {-1} \frac a { {\sqrt{x^2 - a^2} } } \rd x + | c = {{Defof|Absolute Value}} +}} +{{eqn | r = -a \int \frac 1 { {\sqrt{x^2 - a^2} } } \rd x + | c = [[Primitive of Constant Multiple of Function]] +}} +{{eqn | r = -a \size {\ln \size {x + \sqrt {x^2 - a^2} } } + C + | c = [[Integral of One Over Square Root of Binomial]] +}} +{{eqn | r = -a \paren {-\ln \size {x + \sqrt {x^2 - a^2} } } + C + | c = as argument of [[Definition:Natural Logarithm|logarithm]] is [[Definition:Strictly Negative Real Number|negative]] +}} +{{eqn | r = a \ln \size {x + \sqrt {x^2 - a^2} } + C + | c = as argument of [[Definition:Natural Logarithm|logarithm]] is [[Definition:Strictly Negative Real Number|negative]] +}} +{{end-eqn}} +{{qed}} +[[Category:Primitives involving Inverse Secant Function]] +lyfdo47tn7b8vs8a82zr6p4vqsqu1eh +\end{proof}<|endoftext|> +\section{Cross Product of Vector with Itself is Zero} +Tags: Vector Cross Product + +\begin{theorem} +Let $\mathbf x$ be a [[Definition:Vector (Linear Algebra)|vector]] in a [[Definition:Vector Space|vector space]] of [[Definition:Dimension of Vector Space|$3$ dimensions]]: +: $\mathbf x = x_i \mathbf i + x_j \mathbf j + x_k \mathbf k$ +Then: +:$\mathbf x \times \mathbf x = \mathbf 0$ +where $\times$ denotes [[Definition:Vector Cross Product|vector cross product]]. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \mathbf x \times \mathbf x + | r = \begin{vmatrix} +\mathbf i & \mathbf j & \mathbf k \\ +x_i & x_j & x_k \\ +x_i & x_j & x_k \\ +\end{vmatrix} + | c = {{Defof|Vector Cross Product}} +}} +{{eqn | r = \mathbf 0 + | c = [[Square Matrix with Duplicate Rows has Zero Determinant]] +}} +{{end-eqn}} +{{qed}} +[[Category:Vector Cross Product]] +1bl292w88tnbfskcppb6mc83yd4zye2 +\end{proof}<|endoftext|> +\section{Reciprocal of Riemann Zeta Function} +Tags: Riemann Zeta Function + +\begin{theorem} +For $\Re \left({z}\right) > 1$: +:$\displaystyle \frac 1 {\zeta \left({z}\right)} = \sum_{k \mathop = 1}^\infty \frac{\mu \left({k}\right)} {k^z}$ +where: +: $\zeta$ is the [[Definition:Riemann Zeta Function|Riemann zeta function]] +: $\mu$ is the [[Definition:Möbius Function|Möbius function]]. +\end{theorem} + +\begin{proof} +By definition of the [[Definition:Riemann Zeta Function/Definition 2|Riemann zeta function]]: +{{begin-eqn}} +{{eqn | l = \frac 1 {\zeta \left({z}\right)} + | r = \prod_{p \text{ prime} } \left({1 - p^{-z} }\right) + | c = +}} +{{eqn | r = \left({1 - \frac 1 {2^z} }\right) \left({1 - \frac 1 {3^z} }\right) \left({1 - \frac 1 {5^z} }\right) \left({1 - \frac 1 {7^z} }\right) \left({1 - \frac 1 {11^z} }\right) \cdots + | c = +}} +{{end-eqn}} +The expansion of this product will be: +:$\displaystyle 1 + \sum_{n \text{ prime}} \left({\frac{-1} {n^z} }\right) + \sum_{n \mathop = p_1 p_2} \left({ \frac{-1}{p_1^z} \frac{-1} {p_2^z} }\right) + \sum_{n \mathop = p_1 p_2 p_3} \left({ \frac {-1} {p_1^z} \frac {-1} {p_2^z} \frac{-1}{p_3^z} }\right) + \cdots$ +which is precisely: +:$\displaystyle \sum_{n \mathop = 1}^\infty \frac{\mu \left({n}\right)} {n^z}$ +as desired. +{{handwaving|get this from [[Dirichlet Series of Inverse of Arithmetic Function]] instead}} +{{qed}} +[[Category:Riemann Zeta Function]] +5s8vvcoogyqnqlpso596vxiumn1kgd1 +\end{proof}<|endoftext|> +\section{Condition for Agreement of Family of Mappings} +Tags: Mapping Theory, Set Union + +\begin{theorem} +Let $\left({A_i}\right)_{i \mathop \in I}, \left({B_i}\right)_{i \mathop \in I}$ be families of non empty sets. +Let $\left({f_i}\right)_{i \mathop \in I}$ be a family of [[Definition:Mapping|mappings]] such that: +:$\forall i \in I: f_i \in \mathcal F \left({A_i, B_i}\right)$ +{{explain|Clarify: what is $\mathcal F \left({A_i, B_i}\right)$? From the context it can be understood as being the set of all [[Definition:Mapping|mappings]] $f_i: A_i \to B_i$ but this is just a guess. The domain and range of each of the $f_i$ could help with being explicitly defined.
Note also that I have taken the liberty of exchanging the difficult-to-read $\mathscr F$ with the more eye-friendly $\mathcal F$.}} +We have that: +:$\displaystyle \bigcup_{i \mathop \in I} f_i \in \mathcal F \left({\bigcup_{i \mathop \in I} A_i, \bigcup_{i \mathop \in I} B_i }\right)$ +{{iff}}: +:$\displaystyle \forall i, j \in I: \operatorname{Dom} f_i \cap \operatorname{Dom} f_j \ne \varnothing \implies \left({\forall a \in \left({\operatorname{Dom} f_i \cap \operatorname{Dom} f_j}\right), \left({a, b}\right) \in f_i \implies \left({a, b}\right) \in f_j}\right)$ +\end{theorem} + +\begin{proof} +Let $\left({A_i}\right)_{i \mathop \in I}, \left({B_i}\right)_{i \mathop \in I}$ be families of non empty sets. +Let $\left({f_i}\right)_{i \mathop \in I}$ be a family of [[Definition:Mapping|mappings]] such that: +:$\forall i \in I: f_i \in \mathcal F \left({A_i, B_i}\right)$ +=== Sufficient Condition === +Let: +:$\displaystyle \bigcup_{i \mathop \in I} f_i \in \mathcal F \left({\bigcup_{i \mathop \in I} A_i, \bigcup_{i \mathop \in I} B_i}\right)$ +Let $i, j \in I$ be such that: +:$\operatorname{Dom} f_i \cap \operatorname{Dom} f_j \ne \varnothing$ +Let $a \in \left({\operatorname{Dom} f_i \cap \operatorname{Dom} f_j}\right)$ +Let $\displaystyle b \in \bigcup_{i \mathop \in I} B_i$ be such that: +:$\left({a, b}\right) \in f_i$ +Aiming for a [[Definition:Contradiction|contradiction]], suppose: +:$\left({a, b}\right) \notin f_j$ +As $a \in \left({\operatorname{Dom} f_i \cap \operatorname{Dom} f_j} \right)$: +:$\displaystyle \exists c \in \bigcup_{i \mathop \in I} B_i: \left({a, c}\right) \in f_j$ +As $\left({a, b}\right) \in f_i$: +:$\displaystyle \left({a, b}\right) \in \bigcup_{i \mathop \in I} f_i$ +Thus: +:$\displaystyle \left({a, b}\right), \left({a, c}\right) \in \bigcup_{i \mathop \in I} f_i$ +such that $b \ne c$ and $\displaystyle \bigcup_{i \mathop \in I} f_i$ is a [[Definition:Mapping|mapping]]. +This is a [[Definition:Contradiction|contradiction]]. +Thus the supposition that the fact $\left({a, b}\right) \notin f_j$ was false. +So: +:$\left({a, b}\right) \in f_j$ +{{qed|lemma}} +=== Necessary Condition === +Let: +:$\forall i, j \in I: \operatorname{Dom} f_i \cap \operatorname{Dom} f_j \ne \varnothing \implies \left({\forall a \in \left({\operatorname{Dom} f_i \cap \operatorname{Dom} f_j}\right), \left({a, b}\right) \in f_i \implies \left({a, b}\right) \in f_j}\right)$ +Let $\displaystyle a \in \bigcup_{i \mathop \in I} A_i$. +Hence: +:$\exists k \in I: a \in A_k$ +Let $k \in I$. +Thus: +:$a \in \operatorname{Dom} f_k$ +Let $l = f_k \left({a}\right)$ +It follows that: +:$\left({a, l}\right) \in f_k$ +and so: +:$\displaystyle \left({a, l}\right) \in \bigcup_{i \mathop \in I} f_i$ +Aiming for a [[Definition:Contradiction|contradiction]], suppose: +:$\displaystyle \exists m \in \bigcup_{i \mathop \in I} B_i: \left({\left({a, m}\right) \in \bigcup_{i \mathop \in I} f_i \land m \ne l}\right)$ +Let $\displaystyle m \in \bigcup_{i \mathop \in I} B_i$. +Let $j \in I$ be such that: +:$\left({a, m}\right) \in f_j$ +We have: +:$a \in \left({\operatorname{Dom} f_k \cap \operatorname{Dom} f_j}\right)$ +As $\left({a, l}\right) \in f_k$: +:$\left(a,l \right) \in f_j$. +Therefore: +:$\left({a, m}\right), \left({a, l}\right) \in f_j$ +where $f_j \in \mathcal F \left({A_j, B_j}\right)$ and $m \ne l$. +This [[Definition:Contradiction|contradicts]] the definition of [[Definition:Mapping|mapping]]. +So: +:$\displaystyle \nexists m \in \bigcup_{i \mathop \in I} B_i: \left({\left({a, m}\right) \in \bigcup_{i \mathop \in I} f_i \land m \ne l}\right)$ +and so: +:$\displaystyle \bigcup_{i \mathop \in I} f_i \in \mathcal F \left({\bigcup_{i \mathop \in I} A_i, \bigcup_{i \mathop \in I} B_i}\right)$ +{{qed}} +[[Category:Mapping Theory]] +[[Category:Set Union]] +4742h2h4m9o7qykrnfpt0mgy6tfbfrx +\end{proof}<|endoftext|> +\section{Supremum of Set of Real Numbers is at least Supremum of Subset} +Tags: Real Analysis, Supremum of Set of Real Numbers is at least Supremum of Subset + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]] of [[Definition:Real Number|real numbers]]. +Let $S$ have a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +Let $T$ be a [[Definition:Non-Empty Set|non-empty]] [[Definition:Subset|subset]] of $S$. +Then $\sup T$ exists and: +:$\sup T \le \sup S$ +\end{theorem} + +\begin{proof} +The [[Definition:Real Number|number]] $\sup S$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $S$. +Therefore, $\sup S$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $T$ as $T$ is a [[Definition:Non-Empty Set|non-empty]] [[Definition:Subset|subset]] of $S$. +Accordingly, $T$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]] by the [[Continuum Property]]. +The [[Definition:Real Number|number]] $\sup S$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $T$. +Therefore, $\sup S$ is greater than or equal to $\sup T$ as $\sup T$ is the [[Definition:Supremum of Subset of Real Numbers|least upper bound]] of $T$. +{{qed}} +\end{proof} + +\begin{proof} +By the [[Continuum Property]], $T$ admits a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +It follows from [[Supremum of Subset]] that $\sup T \le \sup S$. +{{qed}} +\end{proof} + +\begin{proof} +$S$ is [[Definition:Bounded Above Set|bounded above]] as $S$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +Therefore, $T$ is [[Definition:Bounded Above Set|bounded above]] as $T$ is a [[Definition:Subset|subset]] of $S$. +Accordingly, $T$ admits a [[Definition:Supremum of Subset of Real Numbers|supremum]] by the [[Continuum Property]] as $T$ is [[Definition:Non-Empty Set|non-empty]]. +We know that $\sup T$ and $\sup S$ exist. +Therefore by [[Suprema of two Real Sets]]: +:$\forall \epsilon \in \R_{>0}: \forall t \in T: \exists s \in S: t < s + \epsilon \iff \sup T \le \sup S$ +We have: +{{begin-eqn}} +{{eqn | l = \forall \epsilon + | o = \in + | r = \R_{>0}: 0 < \epsilon +}} +{{eqn | ll= \leadsto + | l = \forall \epsilon + | o = \in + | r = \R_{>0}: \forall t \in T: t < t + \epsilon +}} +{{eqn | ll= \leadsto + | l = \forall \epsilon + | o = \in + | r = \R_{>0}: \forall t \in T: t < s + \epsilon \land s = t +}} +{{eqn | ll= \leadsto + | l = \forall \epsilon + | o = \in + | r = \R_{>0}: \forall t \in T: \exists s \in S: t < s + \epsilon + | c = as $T \subseteq S$ +}} +{{eqn | ll= \leadsto + | l = \sup T + | o = \le + | r = \sup S +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +By definition $\sup S$ is an [[Definition:Upper Bound|upper bound]] for $S$. +Thus: +:$\forall x \in S: x \le \sup S$ +As $T \subseteq S$ we have by definition of [[Definition:Subset|subset]] that: +:$\forall x \in T: x \in S$ +Hence: +:$\forall x \in T: x \le \sup S$ +So by definition $\sup S$ is an [[Definition:Upper Bound|upper bound]] for $T$. +So $\sup S$ is at least as big as the [[Definition:Smallest Element|smallest]] [[Definition:Upper Bound|upper bound]] for $T$ +Thus by definition of [[Definition:Supremum|supremum]]: +:$\sup T \le \sup S$ +{{qed}} +\end{proof}<|endoftext|> +\section{Supremum of Subset of Union Equals Supremum of Union} +Tags: Real Analysis + +\begin{theorem} +Let $S$ be a [[Definition:Non-Empty Set|non-empty]] [[Definition:Real Number|real set]]. +Let $S$ have a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +Let $\set {S_i: i \in \set {1, 2, \ldots, n} }$, $n \in \N_{>0}$, be a set of [[Definition:Non-Empty Set|non-empty]] [[Definition:Subset|subsets]] of $S$. +Let $\bigcup S_i = S$. +Then there exists a $j$ in $\set {1, 2, \ldots, n}$ such that: +:$\sup S_j = \sup S$ +\end{theorem} + +\begin{proof} +If $S$ equals $S_j$ for a $j$ in $\set {1, 2, \ldots, n}$, it is trivially true that $\sup S = \sup S_j$. +Now assume that $S$ is unequal to $S_i$ for every $i$ in $\left\{{1, 2, \ldots, n}\right\}$. +By [[Supremum of Set of Real Numbers is at least Supremum of Subset]], $\sup S \ge \sup S_i$ for every $i$ in $\set{1, 2, \ldots, n}$. +There are two alternatives; either: +:$\sup S > \sup S_i$ for every $i$ in $\set {1, 2, \ldots, n}$ +or: +:$\sup S = \sup S_j$ for at least one $j$ in $\set {1, 2, \ldots, n}$. +Suppose that: +:$\sup S > \sup S_i$ for every $i$ in $\set {1, 2, \ldots, n}$ +Let $\epsilon = \sup S - \map \max {\sup S_1, \sup S_2, \ldots, \sup S_n}$. +We note that $\epsilon > 0$. +By [[Supremum of Subset of Real Numbers is Arbitrarily Close]], $S$ has an [[Definition:Element|element]] $x$ that satisfies: +:$x > \sup S - \epsilon$ +We have: +{{begin-eqn}} +{{eqn | o = > + | l = x + | r = \sup S - \epsilon +}} +{{eqn | r = \sup S - \paren {\sup S - \map \max {\sup S_1, \sup S_2, \ldots, \sup S_n} } + | c = definition of $\epsilon$ +}} +{{eqn | r = \map \max {\sup S_1, \sup S_2, \ldots, \sup S_n} +}} +{{end-eqn}} +Therefore: +:$x > \map \max {\sup S_1, \sup S_2, \ldots, \sup S_n}$ +This means that $x > \sup S_i$ for every $i$ in $\set {1, 2, \ldots, n}$. +However, $x$ must be an [[Definition:Element|element]] of $S_j$ for some $j$ in $\set {1, 2, \ldots, n}$ as $x \in S$ and $S = \bigcup S_i$. +Accordingly, it is not true that $\sup S > \sup S_i$ for every $i$ in $\set {1, 2, \ldots, n}$. +We just concluded that the alternative: +:$\sup S > \sup S_i$ for every $i$ in $\set {1, 2, \ldots, n}$ +is not true. +Therefore, the other alternative: +:$\sup S = \sup S_j$ for a $j$ in $\set {1, 2, \ldots, n}$ +is true. +{{qed}} +[[Category:Real Analysis]] +2p2kel5xg9ahcfw856r2xlxijud240n +\end{proof}<|endoftext|> +\section{Condition for Ideal to be Total Ring} +Tags: Ideal Theory + +\begin{theorem} +Let $\left({A, +, \circ}\right)$ be a [[Definition:Commutative and Unitary Ring|commutative ring with unity]]. +Let $I$ be an [[Definition:Ideal of Ring|ideal]] of $A$ such that the [[Definition:Quotient Ring|quotient ring]] $A / I$ is a [[Definition:Field (Abstract Algebra)|field]]. +Let $J$ be an [[Definition:Ideal of Ring|ideal]] of $A$ such that $I \subsetneq J$. +Then: +:$A = J$ +\end{theorem} + +\begin{proof} +Let $A$ be a [[Definition:Commutative and Unitary Ring|commutative ring with unity]]. +Let $I$ be an [[Definition:Ideal of Ring|ideal]] of $A$ such that the [[Definition:Quotient Ring|quotient ring]] $A / I$ is a [[Definition:Field (Abstract Algebra)|field]]. +Let $J$ be an [[Definition:Ideal of Ring|ideal]] of $A$ such that $I \subsetneq J$. +From [[Ideal is Subring]]: +:$J \subseteq A$ +It remains to be proved that that $A \subseteq J$. +Let $a \in A$. +As $I \subsetneq J$, it follows from definition of [[Definition:Proper Subset|proper subset]] that: +:$\exists j \in J: j \notin I$ +Consider the [[Definition:Coset|coset]] $j + I \in A / I$. +As $A / I$ is a [[Definition:Field (Abstract Algebra)|field]]: +:$\exists C \in A / I: \left({j + I}\right) \circ C = 1 + I$ +Let $j' \in A$ be such that $C = j' + I$. +Then: +{{begin-eqn}} +{{eqn | l = \left({j + I}\right) \circ \left({j' + I}\right) + | r = 1 + I + | c = +}} +{{eqn | ll= \implies + | l = j \circ j' + I + | r = 1 + I + | c = +}} +{{eqn | ll= \implies + | l = a \circ \left({j \circ j'}\right) + I + | r = a + I + | c = +}} +{{eqn | ll= \implies + | lo= \exists i, i' \in I: + | l = a \circ \left({j \circ j'}\right) + i + | r = a + i' + | c = +}} +{{eqn | ll= \implies + | l = a + | r = a \circ j \circ j' + i - i' + | c = +}} +{{eqn | ll= \implies + | l = a \circ j + | o = \in + | r = J + | c = {{Defof|Ideal of Ring}} +}} +{{eqn | ll= \implies + | l = a \circ j \circ j' + | o = \in + | r = J + | c = +}} +{{eqn | ll= \implies + | l = a \cdot j \cdot j' + i - i' + | o = \in + | r = J + | c = {{Defof|Ideal of Ring}} and $I \subsetneq J$ +}} +{{eqn | ll= \implies + | l = a + | o = \in + | r = J + | c = {{Defof|Ideal of Ring}} and $I \subsetneq J$ +}} +{{end-eqn}} +By definition of [[Definition:Subset|subset]]: +:$A \subseteq J$ +Thus $A = J$ follows by definition of [[Definition:Set Equality|set equality]]. +{{qed}} +{{improve|Might be able to get there quicker by using [[Ideals of Field]] on the [[Definition:Quotient Ring|quotient ring]] $A / I$.}} +[[Category:Ideal Theory]] +7q3s92y3hxf8h7dz1s77i9bae1pexhv +\end{proof}<|endoftext|> +\section{Area between Two Non-Intersecting Chords} +Tags: Circles + +\begin{theorem} +Let $AB$ and $CD$ be two [[Definition:Chord of Circle|chords]] of a [[Definition:Circle|circle]] whose [[Definition:Center of Circle|center]] is at $O$ and whose [[Definition:Radius of Circle|radius]] is $r$. +:[[File:Circle with chords and area.png|400px]] +:[[File:Circle with chords and area 3.1.png|400px]] +Let $\alpha$ and $\theta$ be respectively the measures in [[Definition:Radian|radians]] of the [[Definition:Angle|angles]] $\angle COD$ and $\angle AOB$. +Then the [[Definition:Area|area]] $\mathcal A$ between the two [[Definition:Chord of Circle|chords]] is given by: +: $\mathcal A = \dfrac {r^2} 2 \left({\theta - \sin \theta - \alpha + \sin \alpha}\right)$ +if $O$ is not included in the [[Definition:Area|area]], and: +: $\mathcal A = r^2 \left({\pi - \dfrac 1 2 \left({\theta - \sin \theta + \alpha - \sin \alpha}\right)}\right)$ +if $O$ is included in the [[Definition:Area|area]]. +\end{theorem} + +\begin{proof} +Let $\mathcal S_\alpha$ be the [[Definition:Area|area]] of the [[Definition:Segment of Circle|segment]] whose [[Definition:Base of Segment|base]] [[Definition:Subtend|subtends]] $\alpha$. +Let $\mathcal S_\theta$ be the [[Definition:Area|area]] of the [[Definition:Segment of Circle|segment]] whose [[Definition:Base of Segment|base]] [[Definition:Subtend|subtends]] $\theta$. +=== Case $(1)$: Center included in Area === +Let the [[Definition:Center of Circle|center]] $O$ be included in the [[Definition:Area|area]]. +The [[Definition:Area|area]] between the two [[Definition:Chord of Circle|chords]] is given by: +:the [[Definition:Area|area]] of the whole [[Definition:Circle|circle]] +minus: +:the [[Definition:Area|areas]] of the [[Definition:Segment of Circle|segments]] $\mathcal S_\alpha$ and $\mathcal S_\theta$ . +Thus: +{{begin-eqn}} +{{eqn | l = \mathcal A + | r = \pi r^2 - \mathcal S_\alpha - \mathcal S_\theta + | c = [[Area of Circle]]: $\pi r^2$ +}} +{{eqn | r = \pi r^2 - \frac 1 2 r^2 \left({\theta - \sin \theta}\right) - \frac 1 2 r^2 \left({\alpha - \sin \alpha}\right) + | c = [[Area of Segment of Circle]] +}} +{{eqn | r = r^2 \left({\pi - \frac 1 2 \left({\theta - \sin \theta + \alpha - \sin \alpha}\right)}\right) + | c = rearranging +}} +{{end-eqn}} +{{qed|lemma}} +=== Case $(2)$: Center not included in Area === +Let $\theta \ge \alpha$. +The [[Definition:Area|area]] between the two [[Definition:Chord of Circle|chords]] is given by: +:the [[Definition:Area|area]] of the [[Definition:Segment of Circle|segment]] $\mathcal S_\theta$ +minus: +:the [[Definition:Area|area]] of the [[Definition:Segment of Circle|segment]] $\mathcal S_\alpha$. +Thus: +{{begin-eqn}} +{{eqn | l = \mathcal A + | r = \mathcal S_\theta - \mathcal S_\alpha +}} +{{eqn | r = \frac 1 2 r^2 \left({\theta - \sin \theta}\right) - \frac 1 2 r^2 \left({\alpha - \sin \alpha}\right) + | c = [[Area of Segment of Circle]] +}} +{{eqn | r = \frac {r^2} 2 \left({\theta - \sin \theta - \alpha + \sin \alpha}\right) + | c = rearranging +}} +{{end-eqn}} +{{qed}} +[[Category:Circles]] +tpt20mes3aa6vungexvygtvr7phgzp5 +\end{proof}<|endoftext|> +\section{Supremum of Absolute Value of Difference equals Supremum of Difference} +Tags: Real Analysis + +\begin{theorem} +Let $S$ be a [[Definition:Non-Empty Set|non-empty]] [[Definition:Real Number|real set]]. +Let $\displaystyle \sup_{x, y \mathop \in S} \paren {x - y}$ exist. +Then $\displaystyle \sup_{x, y \mathop \in S} \size {x - y}$ exists and: +:$\displaystyle \sup_{x, y \mathop \in S} \size {x - y} = \sup_{x, y \mathop \in S} \paren {x - y}$ +\end{theorem} + +\begin{proof} +Consider the [[Definition:Set|set]] $\set {x - y: x, y \in S, x - y \le 0}$. +There is a [[Definition:Real Number|number]] $x'$ in $S$ as $S$ is [[Definition:Non-Empty Set|non-empty]]. +Therefore, $0 \in \set {x - y: x, y \in S, x - y \le 0}$ as $x = y = x'$ implies that $x - y = 0$, $x, y \in S$, and $x - y \le 0$. +Also, $0$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $\set {x - y: x, y \in S, x - y \le 0}$ by definition. +Accordingly: +:$\displaystyle \sup_{x, y \mathop \in S, x − y \mathop \le 0} \paren {x - y} = 0$ +Consider the [[Definition:Set|set]] $\left\{{x - y: x, y \in S, x - y \ge 0}\right\}$. +There is a [[Definition:Real Number|number]] $x'$ in $S$ as $S$ is [[Definition:Non-Empty Set|non-empty]]. +Therefore, $0 \in \left\{{x - y: x, y \in S, x - y \ge 0}\right\}$ as $x = y = x'$ implies that $x - y = 0$, $x, y \in S$, $x - y \ge 0$. +Accordingly: +:$\displaystyle \sup_{x, y \mathop \in S, x − y \mathop \ge 0} \paren {x - y} \ge 0$ +{{improve|I can't immediately think of how it would be done, but it would be good if we could devise a neater and more compact notation that what is used here. All the complicated mathematics is being done in the underscript, which makes it not easy to follow. (Improved Dec. 2016.)}} +{{begin-eqn}} +{{eqn | l = \sup_{x, y \mathop \in S} \paren {x - y} + | r = \sup_{x, y \mathop \in S, x − y \mathop \ge 0 \text { or } x − y \mathop \le 0} \paren {x - y} + | c = as ($x - y \ge 0$ or $x - y \le 0$) is true +}} +{{eqn | r = \max \set {\sup_{x, y \mathop \in S, x − y \mathop \ge 0} \paren {x - y}, \sup_{x, y \mathop \in S, x − y \mathop \le 0} \paren {x - y} } + | c = by [[Supremum of Set Equals Maximum of Suprema of Subsets]] +}} +{{eqn | r = \max \set {\sup_{x, y \mathop \in S, x − y \mathop \ge 0} \paren {x - y}, 0} + | c = as $\displaystyle \sup_{x, y \mathop \in S, x − y \mathop \le 0} \paren {x - y} = 0$ +}} +{{eqn | r = \sup_{x, y \mathop \in S, x − y \mathop \ge 0} \paren {x - y} + | c = as $\displaystyle \sup_{x, y \mathop \in S, x − y \mathop \ge 0} \paren {x - y} \ge 0$ +}} +{{eqn | r = \sup_{x, y \mathop \in S, x − y \mathop \ge 0} \size {x - y} + | c = as $\size {x − y} = x − y$ since $x − y \ge 0$ +}} +{{eqn | r = \max \set {\sup_{x, y \mathop \in S, x − y \mathop \ge 0} \size {x - y}, \sup_{x, y \mathop \in S, x − y \mathop \ge 0} \size {x - y} } + | c = as the two arguments of max are equal +}} +{{eqn | r = \max \set {\sup_{x, y \mathop \in S, x − y \mathop \ge 0} \size {x - y}, \sup_{y, x \mathop \in S, y − x \mathop \ge 0} \size {y - x} } + | c = by renaming variables $x \leftrightarrow y$ +}} +{{eqn | r = \max \set {\sup_{x, y \mathop \in S, x − y \mathop \ge 0} \size {x - y}, \sup_{x, y \mathop \in S, x − y \mathop \le 0} \size {x - y} } + | c = +}} +{{eqn | r = \sup_{x, y \mathop \in S, x − y \mathop \ge 0 \text { or } x − y \mathop \le 0} \size {x - y} + | c = by [[Supremum of Set Equals Maximum of Suprema of Subsets]] +}} +{{eqn | r = \sup_{x, y \mathop \in S} \size {x - y} + | c = as ($x - y \ge 0$ or $x - y \le 0$) is true +}} +{{end-eqn}} +{{qed}} +[[Category:Real Analysis]] +pku05d5dpl8yk89v4yqaxaa6u3fbnjn +\end{proof}<|endoftext|> +\section{Supremum of Sum equals Sum of Suprema} +Tags: Real Analysis + +\begin{theorem} +Let $A$ and $B$ be [[Definition:Non-Empty Set|non-empty]] [[Definition:Set|sets]] of [[Definition:Real Number|real numbers]]. +Let $A + B$ be $\set {x + y: x \in A, y \in B}$. +Let either $A$ and $B$ have [[Definition:Supremum of Subset of Real Numbers|suprema]] or $A + B$ have a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +Then all $\sup A$, $\sup B$, and $\sup \paren {A + B}$ exist and: +:$\sup \paren {A + B} = \sup A + \sup B$ +\end{theorem} + +\begin{proof} +Assume first that $A$ and $B$ have [[Definition:Supremum of Subset of Real Numbers|suprema]]. +We have: +:$x \le \sup A$ for an arbitrary $x$ in $A$ +:$y \le \sup B$ for an arbitrary $y$ in $B$ +Adding these inequalities, we get: +:$x + y \le \sup A + \sup B$ +The number $x + y$ is an arbitrary [[Definition:Element|element]] of $A + B$ as $x$ and $y$ are arbitrary [[Definition:Element|elements]] of $A$ and $B$ respectively. +Therefore, $\sup A + \sup B$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $A + B$. +$A + B$ is [[Definition:Non-Empty Set|non-empty]] as $A$ and $B$ are [[Definition:Non-Empty Set|non-empty]]. +Accordingly, $A + B$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]] by the [[Continuum Property]]. +Next, assume that $\sup \paren {A + B}$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +We need to prove that $A$ and $B$ have [[Definition:Supremum of Subset of Real Numbers|suprema]]. +Let $y$ be a point in $B$. +We have: +{{begin-eqn}} +{{eqn | l = x + y + | o = \le + | r = \sup \paren {A + B} + | c = for every $x$ in $A$ as $x + y$ is a point in $A + B$ +}} +{{eqn | ll = \iff + | l = x + | o = \le + | r = \sup \paren {A + B} - y +}} +{{end-eqn}} +Therefore, $A$ has an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] as $x$ is an arbitrary point in $A$. +Also, we have that $A$ is [[Definition:Non-Empty Set|non-empty]]. +Accordingly, $A$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]] by the [[Continuum Property]]. +A similar argument gives that $B$ has a [[Definition:Supremum of Subset of Real Numbers|supremum]]. +So, we have shown that all $\sup A$, $\sup B$, and $\sup \paren {A + B}$ exist. +We proceed to show that $\sup \paren {A + B} = \sup A + \sup B$. +We have $\sup \paren {A + B} \le \sup A + \sup B$ as $\sup A + \sup B$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $A + B$. +Accordingly, either: +:$\sup \paren {A + B} < \sup A + \sup B$ +or: +:$\sup \paren {A + B} = \sup A + \sup B$. +Suppose that: +:$\sup \paren {A + B} < \sup A + \sup B$. +Let $\epsilon = \sup A + \sup B - \sup \paren {A + B}$. +We note that $\epsilon > 0$. +Since $\sup A$ is the [[Definition:Supremum of Subset of Real Numbers|least upper bound]] of $A$, there is an [[Definition:Element|element]] $x$ in $A$ such that: +:$x > \sup A - \dfrac \epsilon 2$ by [[Supremum of Subset of Real Numbers is Arbitrarily Close]] +Since $\sup B$ is the [[Definition:Supremum of Subset of Real Numbers|least upper bound]] of $B$, there is an [[Definition:Element|element]] $y$ in $B$ such that: +:$y > \sup B - \dfrac \epsilon 2$ by [[Supremum of Subset of Real Numbers is Arbitrarily Close]] +Adding these inequalities, we get: +{{begin-eqn}} +{{eqn | l = x + y + | o = > + | r = \sup A - \frac \epsilon 2 + \sup B - \frac \epsilon 2 +}} +{{eqn | ll = \iff + | l = x + y + | o = > + | r = \sup A + \sup B - \epsilon +}} +{{eqn | ll = \iff + | l = x + y + | o = > + | r = \sup A + \sup B - \paren {\sup A + \sup B - \sup \paren {A + B} } + | c = definition of $\epsilon$ +}} +{{eqn | ll = \iff + | l = x + y + | o = > + | r = \sup \paren {A + B} +}} +{{end-eqn}} +which is impossible since the [[Definition:Real Number|number]] $x + y$ is an [[Definition:Element|element]] of $A + B$ as $x \in A$ and $y \in B$. +We have found that: +:$\sup \paren {A + B} < \sup A + \sup B$ is not true. +Therefore: +:$\sup \paren {A + B} = \sup A + \sup B$ as $\sup \paren {A + B} \le \sup A + \sup B$. +{{qed}} +[[Category:Real Analysis]] +0vhqmkayoxmgy72m9hooj17zbybrm9s +\end{proof}<|endoftext|> +\section{Edge is Bridge iff in All Spanning Trees} +Tags: Graph Theory + +\begin{theorem} +Let $G$ be a [[Definition:Simple Graph|simple graph]]. +Let $e$ be an [[Definition:Edge of Graph|edge]] of $G$. +Then $e$ is a [[Definition:Bridge|bridge]] in $G$ {{iff}} $e$ belongs to every [[Definition:Spanning Tree|spanning tree]] for $G$. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Let $e$ be a [[Definition:Bridge|bridge]]. +That is, suppose the [[Definition:Edge Deletion|edge deletion]] $G - e$ is [[Definition:Disconnected Graph|disconnected]]. +Let $T$ be an arbitrary [[Definition:Spanning Tree|spanning tree]] for $G$. +By definition $T$ is a [[Definition:Connected Graph|connected]] [[Definition:Subgraph|subgraph]] of $G$. +If $T$ did not contain $e$, then it would also be a [[Definition:Subgraph|subgraph]] of $G - e$. +This contradicts the fact that $G - e$ is [[Definition:Disconnected Graph|disconnected]]. +Therefore $e$ is in $T$. +{{qed|lemma}} +=== Sufficient Condition === +Suppose $T$ is a [[Definition:Spanning Tree|spanning tree]] for $G$. +Suppose that $T$ does not contain $e$. +Then $T$ is a [[Definition:Subgraph|subgraph]] of the [[Definition:Edge Deletion|edge deletion]] $G - e$. +Since $T$ is by definition [[Definition:Connected Graph|connected]], so is $G - e$. +Hence $e$ is not a [[Definition:Bridge|bridge]]. +{{qed}} +[[Category:Graph Theory]] +g7xleezy3dx5oqvng31hcp2096mefwl +\end{proof}<|endoftext|> +\section{Edge is Minimum Weight Bridge iff in All Minimum Spanning Trees} +Tags: Network Theory + +\begin{theorem} +Let $G$ be an [[Definition:Undirected Network|undirected network]]. +Let every [[Definition:Edge of Graph|edge]] of $G$ have a [[Definition:Unique|unique]] [[Definition:Weight (Network Theory)|weight]]. +Let $e$ be an [[Definition:Edge of Graph|edge]] of $G$. +Then $e$ is a [[Definition:Bridge|bridge]] of minimum [[Definition:Weight (Network Theory)|weight]] in $G$ {{iff}} $e$ belongs to every [[Definition:Minimum Spanning Tree|minimum spanning tree]] of $G$. +\end{theorem} + +\begin{proof} +=== Necessary Condition === +Aiming for a [[Definition:Contradiction|contradiction]], suppose $e$ is a [[Definition:Bridge|bridge]] of minimum [[Definition:Weight (Network Theory)|weight]] that does not belong to some [[Definition:Minimum Spanning Tree|minimum spanning tree]] $Q$. +Let $e$ be added to $Q$ to make $Q'$. +Then $e$ forms part of a unique cycle $C$ in $Q$. +Thus there exists an [[Definition:Edge of Graph|edge]] $f \in C$ such that $w \left({Q}\right) < w \left({Q + e - f}\right)$. +This [[Proof by Contradiction|contradicts]] the minimality of $Q$. +{{qed|lemma}} +{{handwaving|The above argument (cleaned up considerably from the original, which was appallingly inadequate) needs to be made rigorous.}} +=== Sufficient Condition === +{{proof wanted}} +[[Category:Network Theory]] +8ouniob19cjt6260sxp6crbsrpbobgu +\end{proof}<|endoftext|> +\section{Maximum Weight Edge in all Minimum Spanning Trees is Bridge} +Tags: Network Theory + +\begin{theorem} +Let $G$ be an [[Definition:Undirected Network|undirected network]]. +Let every [[Definition:Edge of Graph|edge]] of $G$ have a [[Definition:Unique|unique]] [[Definition:Weight (Network Theory)|weight]]. +Let $e$ be an [[Definition:Edge of Graph|edge]] of $G$ that belongs to every [[Definition:Minimum Spanning Tree|minimum spanning tree]] of $G$. +Let $e$ have maximum [[Definition:Weight (Network Theory)|weight]] in $G$. +Then $e$ is a [[Definition:Bridge|bridge]] in $G$. +\end{theorem} + +\begin{proof} +{{proof wanted}} +[[Category:Network Theory]] +rbho87mbmp0tpm8pjc64ugr83gkxxjx +\end{proof}<|endoftext|> +\section{Scaling Property of Dirac Delta Function} +Tags: Dirac Delta Function + +\begin{theorem} +Let $\delta \left({t}\right)$ be the [[Definition:Dirac Delta Function|Dirac delta function]]. +Let $a$ be a non zero [[Definition:Constant|constant]] [[Definition:Real Numbers|real number]]. +Then: +:$\delta \left({a t}\right) = \dfrac {\delta \left({t}\right)} {\left \vert a \right \vert}$ +\end{theorem} + +\begin{proof} +The equation can be rearranged as: +:$\left\vert a \right\vert \delta \left({a t}\right) = \delta \left({t}\right)$ +We will check the [[Definition:Dirac Delta Function|definition]] of Dirac delta function in turn. +Definition of Dirac delta function: +:$\left({1}\right):\delta \left({t}\right) = \begin{cases} ++\infty & : t = 0 \\ +0 & : \text{otherwise} +\end{cases}$ +:$\left({2}\right):\displaystyle \int_{-\infty}^{+\infty} \delta \left({t}\right) \rd t = 1$ +$\left({1}\right):$ +{{begin-eqn}} +{{eqn | l = \left\vert a \right\vert \delta \left({a t}\right) + | r = \begin{cases} \left({\left\vert a \right\vert}\right) \left({+\infty}\right) & : at = 0 \\ +\left({\left\vert a \right\vert}\right) 0 & : \text{otherwise} \end{cases} + | c = Definition of [[Definition:Dirac Delta Function|Dirac delta function]] +}} +{{eqn | ll= \iff + | l = \left\vert a \right\vert \delta \left({a t}\right) + | r = \begin{cases} ++\infty & : t = 0 \\ +0 & : \text{otherwise} +\end{cases} + | c = simplifying +}} +{{end-eqn}} +$\left({2}\right):$ +The proof of this part will be split into two parts, one for [[Definition:Positive Real Number|positive]] $a$ and one for [[Definition:Negative Real Number|negative]] $a$. +For $a>0$: +{{begin-eqn}} +{{eqn | l = \int_{-\infty}^{+\infty} \left\vert a \right\vert \delta \left({at}\right) \rd t + | r = \int_{-\infty}^{+\infty} \left\vert a \right\vert \delta \left({t}\right) \dfrac {\rd t} a + | c = Substitute $t \mapsto \dfrac t a$ +}} +{{eqn | r = \dfrac {\left\vert a \right\vert} a \int_{-\infty}^{+\infty} \delta \left({t}\right) \rd t + | c = Simplifying +}} +{{eqn | r = \dfrac a a \int_{-\infty}^{+\infty} \delta \left({t}\right) \rd t + | c = $a > 0$ +}} +{{eqn | r = 1 + | c = {{Defof|Dirac Delta Function}} +}} +{{end-eqn}} +{{qed|lemma}} +For $a<0$: +{{begin-eqn}} +{{eqn | l = \int_{-\infty}^{+\infty} \left\vert a \right\vert \delta \left({at}\right) \rd t + | r = \int_{+\infty}^{-\infty} \left\vert a \right\vert \delta \left({t}\right) \dfrac {\rd t} a + | c = Substitute $t \mapsto \dfrac t a$ +}} +{{eqn | r = \dfrac {\left\vert a \right\vert} a \int_{+\infty}^{-\infty} \delta \left({t}\right) \rd t + | c = Simplifying +}} +{{eqn | r = \dfrac {-\left\vert a \right\vert} a \int_{-\infty}^{+\infty} \delta \left({t}\right) \rd t + | c = [[Reversal of Limits of Definite Integral]] +}} +{{eqn | r = \dfrac a a \int_{-\infty}^{+\infty} \delta \left({t}\right) \rd t + | c = $a < 0$ +}} +{{eqn | r = 1 + | c = {{Defof|Dirac Delta Function}} +}} +{{end-eqn}} +{{qed|lemma}} +Therefore, by definition, $\left\vert a \right\vert \delta \left({a t}\right) = \delta \left({t}\right)$. +The result follows after rearrangement. +{{qed}} +{{explain|Sorry, I'm going to have to ask the awkward question: can we link to a proof that: $\int_a^b f \left({x}\right) \rd x {{=}} \int_a^b g \left({x}\right) \rd x \implies f {{=}} g$?}} +[[Category:Dirac Delta Function]] +8l8omeuh20gft0p93m703ed0jhstc7c +\end{proof}<|endoftext|> +\section{Supremum of Function is less than Supremum of Greater Function} +Tags: Real Analysis + +\begin{theorem} +Let $f$ and $g$ be [[Definition:Real Function|real functions]]. +Let $S$ be a [[Definition:Subset|subset]] of $\operatorname{Dom} \left({f}\right) \cap \operatorname{Dom} \left({g}\right)$. +Let $f \left({x}\right) \le g \left({x}\right)$ for every $x \in S$. +Let $\displaystyle \sup_{x \mathop \in S} g \left({x}\right)$ exist. +Then $\displaystyle \sup_{x \mathop \in S} f \left({x}\right)$ exists and: +:$\displaystyle \sup_{x \mathop \in S} f \left({x}\right) \le \sup_{x \mathop \in S} g \left({x}\right)$. +\end{theorem} + +\begin{proof} +We have: +{{begin-eqn}} +{{eqn | l = \sup g + | r = \sup \left({f + \left({g - f}\right)}\right) +}} +{{eqn | r = \sup f + \sup \left({g - f}\right) + | c = [[Supremum of Sum equals Sum of Suprema]] +}} +{{end-eqn}} +[[Supremum of Sum equals Sum of Suprema]] also gives that $\sup f$ and $\sup \left({g - f}\right)$ exist. +We have: +{{begin-eqn}} +{{eqn | lo= \forall x \in S: + | l = g \left({x}\right) + | o = \ge + | r = f \left({x}\right) + | c = +}} +{{eqn | ll= \iff + | lo= \forall x \in S: + | l = g \left({x}\right) - f \left({x}\right) + | o = \ge + | r = 0 + | c = +}} +{{eqn | ll= \implies + | lo= \forall x \in S: + | l = \sup \left({g - f}\right) + | o = \ge + | r = g \left({x}\right) - f \left({x}\right) \ge 0 + | c = as $\sup \left({g - f}\right)$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] for $\left\{ {g \left({x}\right) - f \left({x}\right): x \in S}\right\}$ +}} +{{eqn | ll= \implies + | l = \sup \left({g - f}\right) + | o = \ge + | r = 0 +}} +{{eqn | ll= \iff + | l = \sup f + \sup \left({g - f}\right) + | o = \ge + | r = \sup f +}} +{{eqn | ll= \iff + | l = \sup g + | o = \ge + | r = \sup f + | c = as $\sup g = \sup f + \sup \left({g - f}\right)$ +}} +{{end-eqn}} +{{qed}} +[[Category:Real Analysis]] +k0jbjvo8a72k44x24vcm4tqhvagzz1c +\end{proof}<|endoftext|> +\section{Characterization of Boundary by Open Sets} +Tags: Boundaries + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $T$. +Let $x$ be a point of $T$. +Then $x \in \operatorname{Fr} A$ {{iff}}: +:for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +::if $x \in U$ +::then $A \cap U \ne \varnothing$ and $\complement_S \left({A}\right) \cap U \ne \varnothing$ +where: +:$\complement_S \left({A}\right) = S \setminus A$ denotes the [[Definition:Relative Complement|complement]] of $A$ in $S$ +:$\operatorname{Fr} A$ denotes the [[Definition:Boundary (Topology)|boundary]] of $A$. +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $x \in \operatorname{Fr} A$. +Then by [[Boundary is Intersection of Closure with Closure of Complement]]: +:$x \in \left({\complement_S \left({A}\right)}\right)^-$ and $x \in A^-$ +where $A^-$ denotes the [[Definition:Closure (Topology)|closure]] of $A$. +Hence by [[Condition for Point being in Closure]], for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +:$x \in U \implies A \cap U \ne \varnothing$ +and: +:$x \in U \implies \complement_S \left({A}\right) \cap U \ne \varnothing$ +=== Necessary Condition === +Let $x$ be such that for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +:if $x \in U$ +:then $A \cap U \ne \varnothing$ and $\complement_S \left({A}\right) \cap U \ne \varnothing$. +Then by [[Condition for Point being in Closure]]: +:$x \in \left({\complement_S \left({A}\right)}\right)^-$ and $x \in A^-$. +Hence by [[Boundary is Intersection of Closure with Closure of Complement]]: +:$x \in \operatorname{Fr} A$ +{{qed}} +\end{proof}<|endoftext|> +\section{Characterization of Closure by Open Sets} +Tags: Set Closures + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $S$. +Let $x$ be a point of $T$. +Let $A^-$ denote the [[Definition:Closure (Topology)|closure]] of $A$. +Then $x \in A^-$ {{iff}}: +:for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +::$x \in U \implies A \cap U \ne \varnothing$ +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $x \in A^-$. +{{AimForCont}} there exists an [[Definition:Open Set (Topology)|open set]] $U$ of $T$ such that: +:$x \in U$ and $A \cap U = \varnothing$ +We have that $U$ is [[Definition:Open Set (Topology)|open]] in $T$. +So by definition of [[Definition:Closed Set (Topology)|closed set]], $\complement_S \left({U}\right)$ is [[Definition:Closed Set (Topology)|closed]] in $T$. +Then: +{{begin-eqn}} +{{eqn | l = A \cap U + | r = \varnothing + | c = +}} +{{eqn | ll= \implies + | l = A + | o = \subseteq + | r = \complement_S \left({U}\right) + | c = [[Empty Intersection iff Subset of Complement]] +}} +{{eqn | ll= \implies + | l = A^- + | o = \subseteq + | r = \complement_S \left({U}\right) + | c = Definition of [[Definition:Closure (Topology)/Definition 3|Set Closure]]: $\complement_S \left({U}\right)$ is [[Definition:Closed Set (Topology)|closed]] +}} +{{eqn | ll= \implies + | l = A^- \cap U + | r = \varnothing + | c = [[Empty Intersection iff Subset of Complement]] +}} +{{end-eqn}} +But we have: +:$x \in A^-$ +and also: +:$x \in U$ +and thus by definition of [[Definition:Set Intersection|set intersection]]: +:$x \in A^- \cap U$ +This [[Definition:Contradiction|contradicts]] $A^- \cap U = \varnothing$ +Hence by [[Proof by Contradiction]] the assumption that there exists an [[Definition:Open Set (Topology)|open set]] $U$ of $T$ such that $x \in U$ and $A \cap U = \varnothing$ was false. +So for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +:$x \in U \implies A \cap U \ne \varnothing$ +{{qed|lemma}} +=== Necessary Condition === +Let $x$ be such that for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +:$x \in U \implies A \cap U \ne \varnothing$ +{{AimForCont}} $x \notin A^-$. +Then: +:$x \in \complement_S \left({A^-}\right)$ +Then by assumption: +:$A \cap \complement_S \left({A^-}\right) \ne \varnothing$ +as $\complement_S \left({A^-}\right)$ is [[Definition:Open Set (Topology)|open]]. +By definition of [[Definition:Relative Complement|complement]]: +:$A \cap \complement_S \left({A}\right) = \varnothing$ +So by [[Empty Intersection iff Subset of Complement]]: +:$A \not \subseteq A^-$ +From [[Set is Subset of its Topological Closure]]: +:$A \subseteq A^-$ +But from [[Set Complement inverts Subsets]]: +:$\complement_S \left({A^-}\right) \subseteq \complement_S \left({A}\right)$ +from which by [[Empty Intersection iff Subset of Complement]]: +:$A \cap \complement_S \left({A^-}\right) = \varnothing$ +Hence by [[Proof by Contradiction]] the assumption that $x \notin A^-$ was false. +So $x \in A^-$. +{{qed}} +\end{proof}<|endoftext|> +\section{Summation is Linear/Sum of Summations} +Tags: Numbers, Proofs by Induction + +\begin{theorem} +:$\displaystyle \sum_{i \mathop = 1}^n x_i + \sum_{i \mathop = 1}^n y_i = \sum_{i \mathop = 1}^n \paren {x_i + y_i}$ +\end{theorem} + +\begin{proof} +The proof proceeds by [[Principle of Mathematical Induction|mathematical induction]]. +For all $n \in \N_{> 0}$, let $\map P n$ be the [[Definition:Proposition|proposition]]: +:$\displaystyle \sum_{i \mathop = 1}^n x_i + \sum_{i \mathop = 1}^n y_i = \sum_{i \mathop = 1}^n \paren {x_i + y_i}$ +=== Basis for the Induction === +$\map P 1$ is the case: +{{begin-eqn}} +{{eqn | l = \sum_{i \mathop = 1}^1 x_i + \sum_{i \mathop = 1}^1 y_i + | r = x_1 + y_1 +}} +{{eqn | r = \sum_{i \mathop = 1}^1 \paren {x_i + y_i} +}} +{{end-eqn}} +This is the [[Definition:Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now it needs to be shown that, if $\map P k$ is true, where $k \ge 1$, then it logically follows that $\map P {k + 1}$ is true. +So this is the [[Definition:Induction Hypothesis|induction hypothesis]]: +:$\displaystyle \sum_{i \mathop = 1}^k x_i + \sum_{i \mathop = 1}^k y_i = \sum_{i \mathop = 1}^k \paren {x_i + y_i}$ +from which it is to be shown that: +:$\displaystyle \sum_{i \mathop = 1}^{k + 1} x_i + \sum_{i \mathop = 1}^{k + 1} y_i = \sum_{i \mathop = 1}^{k + 1} \paren {x_i + y_i}$ +=== Induction Step === +This is the [[Definition:Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = \sum_{i \mathop = 1}^{k + 1} x_i + \sum_{i \mathop = 1}^{k + 1} y_i + | r = \paren {\sum_{i \mathop = 1}^k x_i + x_{k + 1} } + \paren {\sum_{i \mathop = 1}^k y_i + y_{k + 1} } + | c = {{Defof|Summation}} +}} +{{eqn | r = \left({\sum_{i \mathop = 1}^k x_i + \sum_{i \mathop = 1}^k y_i}\right) + \paren {x_{k + 1} + y_{k + 1} } + | c = [[Commutative Law of Addition]] and [[Associative Law of Addition|Associative]] +}} +{{eqn | r = \sum_{i \mathop = 1}^k \paren {x_i + y_i} + \paren {x_{k + 1} + y_{k + 1} } + | c = [[Summation is Linear/Sum of Summations#Induction Hypothesis|Induction Hypothesis]] +}} +{{eqn | r = \sum_{i \mathop = 1}^{k + 1} \paren {x_i + y_i} + | c = {{Defof|Summation}} +}} +{{end-eqn}} +So $\map P k \implies \map P {k + 1}$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\displaystyle \forall n \in \N_{> 0}: \sum_{i \mathop = 1}^n x_i + \sum_{i \mathop = 1}^n y_i = \sum_{i \mathop = 1}^n \paren {x_i + y_i}$ +{{qed}} +[[Category:Numbers]] +[[Category:Proofs by Induction]] +ibolccjs5zc8erhwzq2a6bct6mfwewf +\end{proof}<|endoftext|> +\section{Summation is Linear/Scaling of Summations} +Tags: Numbers, Proofs by Induction + +\begin{theorem} +:$\displaystyle \lambda \sum_{i \mathop = 1}^n x_i = \sum_{i \mathop = 1}^n \lambda x_i$ +\end{theorem} + +\begin{proof} +For all $n \in \N_{> 0}$, let $P \left({n}\right)$ be the [[Definition:Proposition|proposition]]: +:$\displaystyle \lambda \sum_{i \mathop = 1}^n x_i = \sum_{i \mathop = 1}^n \lambda x_i$ +=== Basis for the Induction === +$P \left({1}\right)$ is the case: +{{begin-eqn}} +{{eqn | l = \lambda \sum_{i \mathop = 1}^1 x_i + | r = \lambda x_1 +}} +{{eqn | r = \sum_{i \mathop = 1}^1 \lambda x_i +}} +{{end-eqn}} +This is the [[Principle of Mathematical Induction#Basis for the Induction|basis for the induction]]. +=== Induction Hypothesis === +Now it needs to be shown that, if $P \left({k}\right)$ is true, where $k \ge 2$, then it logically follows that $P \left({k + 1}\right)$ is true. +So this is the [[Principle of Mathematical Induction#Induction Hypothesis|induction hypothesis]]: +:$\displaystyle \lambda \sum_{i \mathop = 1}^k x_i = \sum_{i \mathop = 1}^k \lambda x_i$ +from which it is to be shown that: +:$\displaystyle \lambda \sum_{i \mathop = 1}^{k + 1} x_i = \sum_{i \mathop = 1}^{k + 1} \lambda x_i$ +=== Induction Step === +This is the [[Principle of Mathematical Induction#Induction Step|induction step]]: +{{begin-eqn}} +{{eqn | l = \lambda \sum_{i \mathop = 1}^{k + 1} x_i + | r = \lambda \left({\sum_{i \mathop = 1}^k x_i + x_{k + 1} }\right) + | c = Definition of [[Definition:Summation|Summation]] +}} +{{eqn | r = \lambda \sum_{i \mathop = 1}^k x_i + \lambda x_{k + 1} + | c = [[Multiplication of Numbers Distributes over Addition]] +}} +{{eqn | r = \sum_{i \mathop = 1}^k \lambda x_i + \lambda x_{k + 1} + | c = [[Summation is Linear/Scaling of Summations#Induction Hypothesis|Induction Hypothesis]] +}} +{{eqn | r = \sum_{i \mathop = 1}^{k + 1} \lambda x_i + | c = Definition of [[Definition:Summation|Summation]] +}} +{{end-eqn}} +So $P \left({k}\right) \implies P \left({k + 1}\right)$ and the result follows by the [[Principle of Mathematical Induction]]. +Therefore: +:$\displaystyle \forall n \in \N_{> 0}: \lambda \sum_{i \mathop = 1}^n x_i = \sum_{i \mathop = 1}^n \lambda x_i$ +{{qed}} +[[Category:Numbers]] +[[Category:Proofs by Induction]] +s6gnp5lzhthp0s5hmwo7dcinjytg7f9 +\end{proof}<|endoftext|> +\section{Supremum of Absolute Value of Difference equals Difference between Supremum and Infimum} +Tags: Real Analysis + +\begin{theorem} +Let $f$ be a [[Definition:Real Function|real function]]. +Let $S$ be a [[Definition:Subset|subset]] of the [[Definition:Domain (Set Theory)/Mapping|domain]] of $f$. +Let $\displaystyle \sup_{x \mathop \in S} \set {\map f x}$ and $\displaystyle \inf_{x \mathop \in S} \set {\map f x}$ exist. +Then $\displaystyle \sup_{x, y \mathop \in S} \set {\size {\map f x - \map f y} }$ exists and: +:$\displaystyle \sup_{x, y \mathop \in S} \set {\size {\map f x - \map f y} } = \sup_{x \mathop \in S} \set {\map f x} - \inf_{x \mathop \in S} \set {\map f x}$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \sup_{x \mathop \in S} \set {\map f x} - \inf_{x \mathop \in S} \set {\map f x} + | r = \sup_{x \mathop \in S} \set {\map f x} + \sup_{x \mathop \in S} \set {-\map f x} + | c = [[Negative of Infimum is Supremum of Negatives]] +}} +{{eqn | r = \sup_{x, y \mathop \in S} \set {\map f x + \paren {-\map f y} } + | c = [[Supremum of Sum equals Sum of Suprema]] +}} +{{eqn | r = \sup_{x, y \mathop \in S} \set {\map f x - \map f y} +}} +{{eqn | r = \sup_{x, y \mathop \in S} \set {\size {\map f x - \map f y} } + | c = [[Supremum of Absolute Value of Difference equals Supremum of Difference]] +}} +{{end-eqn}} +{{qed}} +[[Category:Real Analysis]] +kcdheyhqqm4yvk2a0xwcnftr9zctwmt +\end{proof}<|endoftext|> +\section{Diaconescu-Goodman-Myhill Theorem} +Tags: Set Theory, Axiom of Choice + +\begin{theorem} +The [[Axiom:Axiom of Choice|axiom of choice]] implies the [[Law of Excluded Middle|law of excluded middle]]. +\end{theorem} + +\begin{proof} +Let $\mathbb B = \set {0, 1}$. +Let $p$ be a [[Definition:Proposition|proposition]]. +Let the following two [[Definition:Set|sets]] be defined: +:$A = \set {x \in \mathbb B: x = 0 \lor p}$ +:$B = \set {x \in \mathbb B: x = 1 \lor p}$ +where $\lor$ denotes the [[Definition:Disjunction|disjunction]] operator. +We have that: +:$0 \in A$ +and: +:$1 \in B$ +so both $A$ and $B$ are [[Definition:Non-Empty Set|non-empty]] +Then the [[Definition:Set|set]]: +:$X = \set {A, B}$ +is a [[Definition:Set|set]] of [[Definition:Non-Empty Set|non-empty sets]]: +By the [[Axiom:Axiom of Choice|axiom of choice]], there exists a [[Definition:Choice Function|choice function]]: +:$f: X \to \mathbb B$ +since $\displaystyle \bigcup X = \mathbb B$. +There are four cases: +:$(1): \quad \map f A = \map f B = 0$ +This means that $0 \in B$. +But for that to happen, $\paren {0 = 1} \vee p$ must be true. +So by [[Disjunctive Syllogism]], $p$ is true. +:$(2): \quad \map f A = \map f B = 1$ +This means that $1 \in A$. +Arguing similarly to case $(1)$, it follows that $p$ is true in this case also. +:$(3): \quad \map f A = 1 \ne \map f B = 0$ +This means that $A \ne B$ (or otherwise $f$ would pick the same [[Definition:Element|element]]). +But if $p$ is true, that means: +:$A = B = \mathbb B$ +which is a [[Definition:Contradiction|contradiction]]. +Therefore in this case: +:$\neg p$ +:$(4): \quad \map f A = 0 \ne \map f B = 1$ +Using the same reasoning as in case $(3)$, it is seen that in this case: +:$\neg p$ +So by [[Proof by Cases]]: +:$\paren {p \vee \neg p}$ +That is the [[Law of Excluded Middle]]. +{{qed}} +{{Namedfor|Radu Diaconescu|name2 = Noah D. Goodman|name3 = John R. Myhill|cat = Diaconescu|cat2 = Goodman|cat3 = Myhill}} +\end{proof}<|endoftext|> +\section{Characterization of Boundary by Basis} +Tags: Boundaries + +\begin{theorem} +Let $T = \struct {S, \tau}$ be a [[definition:Topological Space|topological space]]. +Let $\BB \subseteq \tau$ be a [[Definition:Analytic Basis|basis]]. +Let $A$ be a [[Definition:Subset|subset]] of $T$. +Let $x$ be a point of $T$. +Then $x \in \partial A$ {{iff}}: +:for every $U \in \BB$: +::if $x \in U$ +::then $A \cap U \ne \O$ and $\relcomp S A \cap U \ne \O$ +where: +:$\relcomp S A = S \setminus A$ denotes the [[Definition:Relative Complement|complement]] of $A$ in $S$ +:$\partial A$ denotes the [[Definition:Boundary (Topology)|boundary]] of $A$ in $T$. +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $x \in \partial A$. +Let $U \in \BB$. +By definition of [[Definition:Analytic Basis|basis]], $U$ is an [[Definition:Open Set (Topology)|open set of $T$]]. +Thus from [[Characterization of Boundary by Open Sets]]: +:if $x \in U$ +::then $A \cap U \ne \O$ and $\relcomp S A \cap U \ne \O$. +{{qed|lemma}} +=== Necessary Condition === +Let $x$ be such that for every $U \in \BB$: +:if $x \in U$ +:then $A \cap U \ne \O$ and $\relcomp S A \cap U \ne \O$. +By [[Characterization of Boundary by Open Sets]], to prove that $x \in \partial A$ it is enough to prove +that: +:for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +::if $x \in U$ then $A \cap U \ne \O$ and $\relcomp S A \cap U \ne \O$. +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x \in U$. +By definition of [[Definition:Analytic Basis|(analytic) basis]], there exists $V \in \BB$ such that: +:$x \in V \subseteq U$ +By assumption: +:$A \cap V \ne \O$ +and: +:$\relcomp S A \cap V \ne \O$ +From the [[Set Intersection Preserves Subsets/Corollary|corollary to Set Intersection Preserves Subsets]]: +:$A \cap V \subseteq A \cap U$ +and: +:$\relcomp S A \cap V \subseteq \relcomp S A \cap U$ +So: +:$A \cap U \ne \O$ and $\relcomp S A \cap U \ne \O$ +and hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Interiors and Boundary Equals Whole Space} +Tags: Boundaries, Set Interiors + +\begin{theorem} +Let $T = \struct {S, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $T$. +Then: +:$S = \Int A \cup \partial A \cup \Int {A'}$ +where: +:$A' = S \setminus A$ denotes the [[Definition:Relative Complement|complement of $A$ relative to $S$]] +:$\Int A$ denotes the [[Definition:Interior (Topology)|interior]] of $A$ +:$\partial A$ denotes the [[Definition:Boundary (Topology)|boundary]] of $A$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \Int A \cup \partial A \cup \Int {A'} + | r = \Int A \cup \Int {A'} \cup \partial A + | c = [[Union is Associative]], [[Union is Commutative]] +}} +{{eqn | r = \Int A \cup \Int {A'} \cup \paren {\map \cl A \cap \map \cl {A'} } + | c = [[Boundary is Intersection of Closure with Closure of Complement]] +}} +{{eqn | r = \paren {\Int A \cup \Int {A'} \cup \map \cl A} \cap \paren {\Int A \cup \Int {A'} \cup \map \cl {A'} } + | c = [[Intersection Distributes over Union]] +}} +{{eqn | r = \paren {\Int A \cup \paren {\map \cl A}' \cup \map \cl A} \cap \paren {\Int A \cup \Int {A'} \cup \map \cl {A'} } + | c = [[Complement of Interior equals Closure of Complement]] +}} +{{eqn | r = \paren {\Int A \cup \paren {\paren {\map \cl A}' \cup \map \cl A} } \cap \paren {\Int A \cup \Int {A'} \cup \map \cl {A'} } + | c = [[Union is Associative]] +}} +{{eqn | r = \paren {\Int A \cup S} \cap \paren {\Int A \cup \Int {A'} \cup \map \cl {A'} } + | c = [[Union with Relative Complement]] +}} +{{eqn | r = \paren {\Int A \cup S} \cap \paren {\Int A \cup \Int {A'} \cup \paren {\Int A}'} + | c = [[Complement of Interior equals Closure of Complement]] +}} +{{eqn | r = \paren {\Int A \cup S} \cap \paren {\Int A \cup \paren {\Int A' \cup \Int {A'} } } + | c = [[Union is Associative]], [[Union is Commutative]] +}} +{{eqn | r = \paren {\Int A \cup S} \cap \paren {S \cup \Int {A'} } + | c = [[Union with Relative Complement]] +}} +{{eqn | r = S \cap \paren {S \cup \Int {A'} } + | c = [[Union with Superset is Superset]] +}} +{{eqn | r = S \cap S + | c = [[Union with Superset is Superset]] +}} +{{eqn | r = S + | c = [[Intersection is Idempotent]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Characterization of Derivative by Open Sets} +Tags: Set Derivatives + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $T$. +Let $x$ be a point of $T$. +Then +:$x \in A'$ +{{iff}}: +:for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +::if $x \in U$ +::then there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $x \in A'$. +Then $x$ is an [[Definition:Accumulation Point of Set|accumulation point]] of $A$ by [[Definition:Set Derivative]]. +Then by definition of [[Definition:Accumulation Point of Set|accumulation point]]: +:$(1): \quad x \in \left({A \setminus \left\{ {x}\right\} }\right)^-$ +where $A^-$ denotes the [[Definition:Closure (Topology)|closure]] of $A$. +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x \in U$. +Then by $(1)$ and [[Condition for Point being in Closure]]: +:$\left({A \setminus \left\{ {x}\right\}}\right) \cap U \ne \varnothing$ +Then there exists $y$ being a point such that: +:$(2): \quad y \in A \setminus \left\{ {x}\right\}$ and $y \in U$. +Then $y \in A$ and $y \notin \left\{ {x}\right\}$ by definition of [[Definition:Set Difference|set difference]]. +Hence by $(2)$ and the definitions of [[Definition:Set Intersection|set intersection]] and [[Definition:Singleton|singleton]]: +:$y \in A \cap U$ and $x \ne y$ +{{qed|lemma}} +=== Necessary Condition === +Let $x$ be such that: +:$(3): \quad$ for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$ if $x \in U$, then there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$. +It will be proved that: +:for every [[Definition:Open Set (Topology)|open set]] $G$ of $T$: +::if $x \in G$ +::then $\left({A \setminus \left\{ {x}\right\} }\right) \cap U \ne \varnothing$. +Let $G$ be an [[Definition:Open Set (Topology)|open set]] of $T$ such that $x \in G$. +Then by $(3)$ there exists a point $y$ of $T$ such that: +:$(4): \quad y \in A \cap G$ and $x \ne y$ +Then by the definitions of [[Definition:Set Intersection|set intersection]] and [[Definition:Singleton|singleton]]: +:$y \in A$ and $y \notin \left\{ {x}\right\}$ +By definition of [[Definition:Set Difference|set difference]]: +:$y \in A \setminus \left\{ {x}\right\}$ +By $(4)$ and the definition of [[Definition:Set Intersection|set intersection]]: +:$y \in G$ +Hence: +: $\left({A \setminus \left\{ {x}\right\} }\right) \cap U \ne \varnothing$ +{{qed|lemma}} +Then by [[Condition for Point being in Closure]]: +:$x \in \left({A \setminus \left\{ {x}\right\} }\right)^-$ +Then, by definition, $x$ is an [[Definition:Accumulation Point of Set|accumulation point]] of $A$. +Hence by definition of [[Definition:Set Derivative|set derivative]]: +:$x \in A'$ +{{qed}} +\end{proof}<|endoftext|> +\section{Characterization of Derivative by Local Basis} +Tags: Set Derivatives + +\begin{theorem} +Let $T = \struct {S, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $S$. +Let $x$ be a point of $T$. +Let $\BB \subseteq \tau$ be a [[Definition:Local Basis|local basis]] at $x$. +Then +:$x \in A'$ +{{iff}}: +:for every $U \in \BB$, there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$ +where: +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $x \in A'$. +By [[Characterization of Derivative by Open Sets]]: +For every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +:if $x \in U$ +:then there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$ +As the [[Definition:Element|elements]] of $\BB$ are all [[Definition:Open Set (Topology)|open sets]], it follows that: +For every [[Definition:Open Set (Topology)|open set]] $U \in \BB$: +:if $x \in U$ +:then there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$ +{{qed|lemma}} +=== Necessary Condition === +Let $x$ be such that: +$(1): \quad$ for every subset $U \in \BB$, there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$. +By [[Characterization of Derivative by Open Sets]], to prove that $x \in A'$ it is enough to prove: +:for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +::if $x \in U$ +::then there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$. +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let that $x \in U$. +Then by definition of [[Definition:Local Basis|local basis]]: +:there exists $V \in \BB$ such that: +::$x \in V \subseteq U$ +By $(1)$: +:there exists a point $y$ of $T$ such that $y \in A \cap V$ and $x \ne y$. +By [[Set Intersection Preserves Subsets/Corollary|the corollary to Set Intersection Preserves Subsets]]: +:$A \cap V \subseteq A \cap U$ +and so: +:$y \in A \cap V \implies y \in A \cap U$ +and so $y$ fulfils the conditions of the hypothesis. +{{qed}} +\end{proof}<|endoftext|> +\section{Derivative is Included in Closure} +Tags: Set Derivatives, Set Closures + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $S$. +Then +:$A' \subseteq A^-$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$ +:$A^-$ denotes the [[Definition:Closure (Topology)|closure]] of $A$. +\end{theorem} + +\begin{proof} +Let $x \in A'$. +By [[Condition for Point being in Closure]] it is enough to prove that: +:for every [[Definition:Open Set (Topology)|open set]] $G$ of $T$: +:: if $x \in G$ +:: then $A \cap G \ne \varnothing$. +Let $G$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x \in G$. +Then by [[Characterization of Derivative by Open Sets]]: +: there exists a point $\exists y \in S: y \in A \cap G \land x \ne y$. + +Hence: +:$A \cap G \ne \varnothing$ +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Closure Equals Union with Derivative} +Tags: Set Derivatives, Set Closures + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[definition:Topological Space|topological space]]. +Let $A$ be a [[definition:Subset|subset]] of $S$. +Then: +:$A^- = A \cup A'$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$ +:$A^-$ denotes the [[Definition:Closure (Topology)|closure]] of $A$. +\end{theorem} + +\begin{proof} +=== Closure Subset of Union === +It is to be proved that: +:$A^- \subseteq A \cup A'$ +Let $x \in A^-$. +In the case where $x \in A$ then $x \in A \cup A'$ by definition of [[Definition:Set Union|set union]]. +Let: +:$(1): \quad x \notin A$ +From [[Characterization of Derivative by Open Sets]], to prove $x \in A'$ it is enough to show that: +:for every [[Definition:Open Set (Topology)|open set]] $U$ of $T$: +::if $x \in U$ +::then there exists a point $y$ of $T$ such that $y \in A \cap U$ and $x \ne y$. +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x \in U$. +Then by [[Condition for Point being in Closure]]: +:$A \cap U \ne \varnothing$ +Then by definition of [[Definition:Empty Set|empty set]]: +:$\exists y \in S: y \in A \cap U$ +By definition of [[Definition:Set Intersection|set intersection]]: +:$y \in A$ and $y \in U$ +But as $x \notin A$ it follows by definition of [[Definition:Set Intersection|set intersection]] that: +:$x \notin A \cap U$ +So by $(1)$: +:$x \ne y$ +Thus $y$ fulfils the conditions of the hypothesis, and so: +:$x \in A'$ +Hence by definition of [[Definition:Set Union|set union]]: +:$x \in A \cup A'$ +Thus in all cases: +:$x \in A^- \implies x \in A \cup A'$ +and so: +:$A^- \subseteq A \cup A'$ +{{qed|lemma}} +=== Union Subset of Closure === +It is to be proved that: +:$A \cup A' \subseteq A^-$ +By [[Set is Subset of its Topological Closure]]: +:$A \subseteq A^-$ +By [[Derivative is Included in Closure]]: +:$A' \subseteq A^-$ + +Hence by [[Union of Subsets is Subset]]: +:$A \cup A' \subseteq A^-$ +{{qed|lemma}} +Hence by definition of [[Definition:Set Equality|set equality]]: +:$A^- = A \cup A'$ +\end{proof}<|endoftext|> +\section{Derivative of Subset is Subset of Derivative} +Tags: Set Derivatives + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $A$, $B$ be [[Definition:Subset|subsets]] of $S$. +Then +:$A \subseteq B \implies A' \subseteq B'$ +where $A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$ in $T$. +\end{theorem} + +\begin{proof} +Let $A \subseteq B$. +Let $x \in A'$. +By [[Characterization of Derivative by Open Sets]] it is enough to prove that: +:for every [[Definition:Open Set (Topology)|open set]] $G$ of $T$: +::if $x \in G$ +::then there exists $y$ such that $y \in B \cap G$ and $x \ne y$. +Let $G$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x \in G$. +Then by [[Characterization of Derivative by Open Sets]]: +:there exists a point $y$ of $T$ such that $y \in A \cap G$ and $x \ne y$. +By [[Set Intersection Preserves Subsets/Corollary|the corollary to Set Intersection Preserves Subsets]]: +:$A \cap G \subseteq B \cap G$ +Hence: +:$y \in B \cap G$ and $x \ne y$. +The conditions of the hypothesis are thus fulfilled, and: +:$x \in B'$ +Thus by definition of [[Definition:Subset|subset]]: +:$A' \subseteq B'$ +{{qed}} +\end{proof}<|endoftext|> +\section{Derivative of Union is Union of Derivatives} +Tags: Set Derivatives + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $A$, $B$ be [[Definition:Subset|subsets]] of $S$. +Then +:$\left({A \cup B}\right)' = A' \cup B\,'$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +\end{theorem} + +\begin{proof} +=== Derivative of Union subset of Union of Derivatives === +It will be shown that: +:$\left({A \cup B}\right)' \subseteq A' \cup B\,'$ +Let $x \in \left({A \cup B}\right)'$. +By definition of [[Definition:Set Derivative|set derivative]]: +:$x$ is an [[Definition:Accumulation Point of Set|accumulation point]] of $A \cup B$. + +Then by definition of [[Definition:Accumulation Point of Set|accumulation point of set]]: +:$(1): \quad x \in \left({\left({A \cup B}\right) \setminus \left\{ {x}\right\} }\right)^-$ +where $A^-$ denotes the [[Definition:Closure (Topology)|closure]] of $A$. +By [[Union Distributes over Difference]]: +:$\left({A \cup B}\right) \setminus \left\{ {x}\right\} = \left({A \setminus \left\{ {x}\right\} }\right) \cup \left({B \setminus \left\{ {x}\right\} }\right)$ +Then by [[Closure of Finite Union equals Union of Closures]]: +:$\left({\left({A \cup B}\right) \setminus \left\{ {x}\right\} }\right)^- = \left({A \setminus \left\{ {x}\right\} }\right)^- \cup \left({B \setminus \left\{ {x}\right\} }\right)^-$ +Then by $(1)$ and definition of [[Definition:Set Union|set union]]: +:$x \in \left({A \setminus \left\{ {x}\right\} }\right)^-$ or $x \in \left({B \setminus \left\{ {x}\right\} }\right)^-$ +Then by definition of [[Definition:Accumulation Point of Set|accumulation point of set]]: +:$x$ is an accumulation point of $A$ or $x$ is an accumulation point of $B$. +Then by definition of [[Definition:Set Derivative|set derivative]]: +:$x \in A'$ or $x \in B\,'$ +Hence by definition of [[Definition:Set Union|set union]]: +:$x \in A' \cup B\,'$ +=== Union of Derivatives subset of Derivative of Union === +By [[Set is Subset of Union]]: +:$A \subseteq A \cup B$ and $B \subseteq A \cup B$ +Then by [[Derivative of Subset is Subset of Derivative]]: +:$A' \subseteq \left({A \cup B}\right)'$ and $B\,' \subseteq \left({A \cup B}\right)'$ +Hence by [[Union of Subsets is Subset]]: +:$A' \cup B\,' \subseteq \left({A \cup B}\right)'$ +{{qed}} +\end{proof}<|endoftext|> +\section{Derivative of Derivative is Subset of Derivative in T1 Space} +Tags: Set Derivatives, T1 Spaces + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:T1 Space|$T_1$ topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $S$. +Then +:$A'' \subseteq A'$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$ +\end{theorem} + +\begin{proof} +Let: +: $(1): \quad x \in A''$. +Aiming for a [[Definition:Contradiction|contradiction]], suppose that $x \notin A'$. +Then by [[Characterization of Derivative by Open Sets]] +there exists an open subset $G$ of $T$ such that: +: $(2): \quad x \in G$ and +: $(3): \quad \lnot \exists y: y \in A \cap G \land x \ne y$. +By definition of [[Definition:Fréchet Space (Topology)/Definition 2|$T_1$ space]]: +:$\left\{{x}\right\}$ is [[Definition:Closed Set (Topology)|closed]]. +Then by [[Open Set minus Closed Set is Open]]: +:$(4): \quad G \setminus \{x\}$ is [[Definition:Open Set (Topology)|open]]. +By $(1)$, $(2)$, and [[Characterization of Derivative by Open Sets]] there exists a point $y$ of $T$ such that: +:$(5): \quad y \in A' \cap G$ and +:$(6): \quad x \ne y$. +Then by definition of [[Definition:Set Intersection|intersection]]: +: $y \in A'$. +Then by $(6)$ and definition of [[Definition:Set Difference|set difference]]: +:$(7): \quad y \in A' \setminus \{x\}$. +By definition of [[Definition:Set Intersection|intersection]] and $(5)$: +: $y \in G$. +By $(6)$ and definition of [[Definition:Singleton|singleton]]: +:$y \notin \left\{{x}\right\}$ +Then by definition of [[Definition:Set Difference|set difference]]: +:$(8): \quad y \in G \setminus \{x\}$ +We will prove: +:$(9): \quad G \cap \left({A \setminus \{x\}}\right) = \varnothing$ +:Aiming for a [[Definition:Contradiction|contradiction]] suppose that: +:: $G \cap \left({A \setminus \left\{{x}\right\}}\right) \ne \varnothing$. +:Then by definition of the [[Definition:Empty Set|empty set]] there exists $g$ such that: +::$g \in G \cap \left({A \setminus \left\{{x}\right\}}\right)$ +:Hence by definition of [[Definition:Set Intersection|intersection]]: +:: $g \in G$ and +:: $g \in A \setminus \left\{{x}\right\}$. +:Then by definition of [[Definition:Set Difference|set difference]]: +::$g \in A$ +:Hence by definition of [[Definition:Set Intersection|intersection]]: +:: $g \in A \cap G$ +:Then by $(3)$: +:: $x = g$ +:Hence this by definition of [[Definition:Singleton|singleton]] contadicts with $g \notin \left\{{x}\right\}$ obtained by definition of [[Definition:Set Difference|set difference]]. +:Thus $G \cap \left({A \setminus \left\{{x}\right\}}\right) = \varnothing$. +Define $U = G \setminus \{x\}$ as an [[Definition:Open Set (Topology)|open]] set by $(4)$. +By $(5)$ and definition of [[Definition:Set Difference|set difference]]: +:$y \in A'$ +Then by $(8)$ and [[Characterization of Derivative by Open Sets]] there exists a [[Definition:Element|point]] $q$ of $T$ such that +:$(10): \quad q \in A \cap U$ and +:$(11): \quad y \ne q$. +By $(10)$ and definition of [[Definition:Set Intersection|intersection]]: +: $q \in A$ +By $(11)$ and definition of [[Definition:Singleton|singleton]]: +:$q \notin \left\{{y}\right\}$ +Then by definition of [[Definition:Set Difference|set difference]] +:$(12): \quad q \in A \setminus \left\{{y}\right\}$. +By definition of [[Definition:Set Intersection|intersection]]: +: $q \in U$. +Then by $(12)$ and by definition of [[Definition:Set Difference|set difference]] +:$q \ne x$ and $q \in A$. +Then by definition of [[Definition:Set Difference|set difference]]: +: $q \in A \setminus \{x\}$ +and +:$q \in G$. +Hence this contradicts with $(9)$ by definition of [[Definition:Set Intersection|intersection]]. +Thus the result by [[Proof by Contradiction]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Closure of Derivative is Derivative in T1 Space} +Tags: Set Derivatives, Set Closures, T1 Spaces + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:T1 Space|$T_1$]] [[Definition:Topological Space|topological space]]. +Let $A$ be a [[Definition:Subset|subset]] of $S$. +Then +:$A'^- = A'$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$ +:$A^-$ denotes the [[Definition:Closure (Topology)|closure]] of $A$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = A'^- + | r = A' \cup A'' + | c = [[Closure Equals Union with Derivative]] +}} +{{eqn | o = \subseteq + | r = A' \cup A' + | c = $A'' \subseteq A'$ by [[Derivative of Derivative is Subset of Derivative in T1 Space]] +}} +{{eqn | r = A' + | c = [[Union is Idempotent]] +}} +{{end-eqn}} +So: +:$A'^- \subseteq A'$ +Then by definition of [[Definition:Closure (Topology)/Definition 3|closure]]: +:$A' \subseteq A'^-$ +Hence the result by definition of [[Definition:Set Equality|set equality]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Derivatives is Subset of Derivative of Union} +Tags: Set Derivatives + +\begin{theorem} +Let $T = \struct {S, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let: +:$\FF \subseteq \powerset S$ be a [[Definition:Set of Sets|set of subsets]] of $S$ +where $\powerset S$ denotes the [[Definition:Power Set|power set]] of $S$. +Then: +:$\ds \bigcup_{A \mathop \in \FF} A' \subseteq \paren {\bigcup_{A \mathop \in \FF} A}'$ +where $A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +\end{theorem} + +\begin{proof} +Let $\ds x \in \bigcup_{A \mathop \in \FF} A'$. +Then by definition of [[Definition:Union of Family|union]] there exists $A \in \FF$ such that: +:$(1): \quad x \in A'$ +By [[Set is Subset of Union/Set of Sets|Set is Subset of Union]]: +:$\ds A \subseteq \bigcup_{A \mathop \in \FF} A$ +Then by [[Derivative of Subset is Subset of Derivative]]: +:$\ds A' \subseteq \paren {\bigcup_{A \mathop \in \FF} A}'$ +Hence by $(1)$ the result: +:$\ds x \in \paren {\bigcup_{A \mathop \in \FF} A}'$ +follows by definition of [[Definition:Subset|subset]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Point is Isolated iff not Accumulation Point} +Tags: Isolated Points, Accumulation Points + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $H \subseteq S$. +Let $x \in H$. +Then: +:$x$ is an [[Definition:Isolated Point (Topology)|isolated point]] in $H$ +{{iff}}: +:$x$ is not an [[Definition:Accumulation Point of Set|accumulation point]] of $H$ +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $x \in H$ be an [[Definition:Isolated Point (Topology)|isolated point]] in $H$. +Then by definition of [[Definition:Isolated Point (Topology)|isolated point]]: +:$\exists U \in \tau: H \cap U = \left\{ {x}\right\}$ +That is, by definition of Definition of [[Definition:Unique|uniqueness]]: +:$\lnot \forall U \in \tau: \left({x \in U \implies \exists y \in S: \left({y \in H \cap U \land x \ne y}\right)}\right)$ +Hence by [[Characterization of Derivative by Open Sets]]: +:$x \notin A'$ +where $A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +Thus by definition of [[Definition:Set Derivative|derivative]]: +:$x$ is not an [[Definition:Accumulation Point of Set|accumulation point]] of $H$. +{{qed|lemma}} +=== Necessary Condition === +Let $x \in H$ not be an [[Definition:Accumulation Point of Set|accumulation point]] of $H$. +Thus by definition of [[Definition:Set Derivative|derivative]]: +:$x \notin A'$ +Hence: +{{begin-eqn}} +{{eqn | l = \lnot \forall U \in \tau + | o = : + | r = \left({x \in U \implies \exists y \in S: \left({y \in H \cap U \land x \ne y}\right)}\right) + | c = [[Characterization of Derivative by Open Sets]] +}} +{{eqn | l = \exists U \in \tau + | o = : + | r = \lnot \left({x \in U \implies \exists y \in S: \left({y \in H \cap U \land x \ne y}\right)}\right) + | c = [[Denial of Universality]] +}} +{{eqn | l = \exists U \in \tau + | o = : + | r = \left({x \in U \land \lnot \exists y \in S: \left({y \in H \cap U \land x \ne y}\right)}\right) + | c = [[Conjunction with Negative Equivalent to Negation of Implication]] +}} +{{eqn | l = \exists U \in \tau + | o = : + | r = \left({x \in U \land \forall y \in S: \lnot \left({y \in H \cap U \land x \ne y}\right)}\right) + | c = [[Denial of Existence]] +}} +{{eqn | l = \exists U \in \tau + | o = : + | r = \left({x \in U \land \forall y \in S: \left({y \in H \cap U \implies x = y}\right)}\right) + | c = [[Implication Equivalent to Negation of Conjunction with Negative]] +}} +{{eqn | l = \exists U \in \tau + | o = : + | r = H \cap U = \left\{ {x}\right\} + | c = Definition of [[Definition:Unique|Uniqueness]], and $x \in H$ +}} +{{end-eqn}} +Thus by definition of [[Definition:Isolated Point (Topology)|isolated point]]: +:$x$ is an isolated point in $H$. +{{qed}} +[[Category:Isolated Points]] +[[Category:Accumulation Points]] +grmf8cl1o8om1tbe6spc1mrqkf15c74 +\end{proof}<|endoftext|> +\section{Point is Isolated iff belongs to Set less Derivative} +Tags: Isolated Points, Set Derivatives + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $H \subseteq S$. +Let $x \in S$. +Then: +:$x$ is an [[Definition:Isolated Point (Topology)|isolated point]] in $H$ +{{iff}}: +:$x \in H \setminus H'$ +where +:$H'$ denotes the [[Definition:Set Derivative|derivative]] of $H$. +\end{theorem} + +\begin{proof} +$x$ is an [[Definition:Isolated Point (Topology)|isolated point]] in $H$ +$\iff$ $x \in H$ and $x$ is not an [[Definition:Accumulation Point of Sequence|accumulation point]] of $H$ by [[Point is Isolated iff not Accumulation Point]] +$\iff$ $x \in H$ and $x \notin H'$ by definition of [[Definition:Set Derivative|derivative]] +$\iff$ $x \in H \setminus H'$ by definition of [[Definition:Set Difference|set difference]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Dense-in-itself iff Subset of Derivative} +Tags: Denseness, Set Derivatives + +\begin{theorem} +Let $T$ be a [[Definition:Topological Space|topological space]]. +Let $A \subseteq T$. +Then: +:$A$ is [[Definition:Dense-in-itself|dense-in-itself]] +{{iff}}: +:$A \subseteq A'$ +where +:$A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +\end{theorem} + +\begin{proof} +$A$ is [[Definition:Dense-in-itself|dense-in-itself]] +$\iff$ every $x \in A$ is not [[Definition:Isolated Point (Topology)|isolated point]] in $A$ by definition of [[Definition:Dense-in-itself|dense-in-itself]] +$\iff$ every $x \in A$ is [[Definition:Accumulation Point of Set|accumulation point]] of $A$ by [[Point is Isolated iff not Accumulation Point]] +$\iff$ every $x \in A$ belongs to $A'$ by definition of [[Definition:Set Derivative|derivative]] +$\iff$ $A \subseteq A'$ by definition of [[Definition:Subset|subset]]. +{{qed}} +[[Category:Denseness]] +[[Category:Set Derivatives]] +q22ronc5mp9vnewyqchvq7sa5szs7g8 +\end{proof}<|endoftext|> +\section{Closure of Dense-in-itself is Dense-in-itself in T1 Space} +Tags: Denseness, Set Closures, T1 Spaces + +\begin{theorem} +Let $T$ be a [[Definition:T1 Space|$T_1$]] [[Definition:Topological Space|topological space]]. +Let $A \subseteq T$. +Let $A$ be [[Definition:Dense-in-itself|dense-in-itself]]. +Then the [[Definition:Closure (Topology)|closure]] $A^-$ of $A$ is also [[Definition:Dense-in-itself|dense-in-itself]]. +\end{theorem} + +\begin{proof} +Let $A$ be [[Definition:Dense-in-itself|dense-in-itself]]. +Then by [[Dense-in-itself iff Subset of Derivative]]: +:$(1): \quad A \subseteq A'$ +where $A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +By [[Derivative of Derivative is Subset of Derivative in T1 Space|Derivative of Derivative is Subset of Derivative in $T_1$ Space]]: +:$(2): \quad A'' \subseteq A'$ +By [[Dense-in-itself iff Subset of Derivative]] it is sufficient to proof that: +:$A^- \subseteq \left({A^-}\right)'$ +Thus: +{{begin-eqn}} +{{eqn | l = \left({A^-}\right)' + | r = \left({A \cup A'}\right)' + | c = [[Closure Equals Union with Derivative]] +}} +{{eqn | r = A' \cup A'' + | c = [[Derivative of Union is Union of Derivatives]] +}} +{{eqn | r = A' + | c = $(2)$ and [[Union with Superset is Superset]] +}} +{{eqn | r = A \cup A' + | c = $(1)$ and [[Union with Superset is Superset]] +}} +{{eqn | r = A^- + | c = [[Closure Equals Union with Derivative]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Union of Set of Dense-in-itself Sets is Dense-in-itself} +Tags: Denseness, Set Derivatives + +\begin{theorem} +Let $T$ be a [[Definition:Topological Space|topological space]]. +Let $\mathcal F \subseteq \mathcal P \left({T}\right)$ such that +:every element of $\mathcal F$ is [[Definition:Dense-in-itself|dense-in-itself]]. +Then the [[Definition:Union of Set of Sets|union]] $\bigcup \mathcal F$ is also [[Definition:Dense-in-itself|dense-in-itself]]. +\end{theorem} + +\begin{proof} +By [[Dense-in-itself iff Subset of Derivative]]: +:$\forall A \in \mathcal F: A \subseteq A'$ +where $A'$ denotes the [[Definition:Set Derivative|derivative]] of $A$. +Then by [[Set Union Preserves Subsets]]: +:$\displaystyle \bigcup \mathcal F \subseteq \bigcup_{A \in \mathcal F} A'$ +By [[Union of Derivatives is Subset of Derivative of Union]]: +:$\displaystyle \bigcup_{A \in \mathcal F} A' \subseteq \left({\bigcup \mathcal F}\right)'$ +Then by [[Subset Relation is Transitive]]: +:$\displaystyle \bigcup \mathcal F \subseteq \left({\bigcup \mathcal F}\right)'$ +The result follows by [[Dense-in-itself iff Subset of Derivative]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Weight of Topological Space} +Tags: Topology + +\begin{theorem} +Let $T$ be a [[Definition:Topological Space|topological space]]. +Let $\mathbb B$ be the [[Definition:Set|set]] of all [[Definition:Analytic Basis|bases]] of $T$. +The following definitions of the [[Definition:Weight of Topological Space|weight of $T$]] are [[Definition:Logical Equivalence|equivalent]]: +\end{theorem} + +\begin{proof} +By [[Cardinal Class is Subset of Ordinal Class]], the [[Definition:Set|set]]: +:$M = \set {\card \BB: \BB \in \mathbb B}$ +is a [[Definition:Subset|subset]] of the [[Definition:Ordinal Class|class of ordinals]]. +By [[Ordinal Class is Strongly Well-Ordered by Subset]]: +:$M$ is [[Definition:Well-Ordering|well ordered]] by the [[Definition:Subset|$\subseteq$ relation]]. +By [[Ordinal Class is Strongly Well-Ordered by Subset]] there exists a [[Definition:Smallest Element|smallest element]] $m_0 \in M$: +:$\forall m \in M: m_0 \subseteq m$ +Hence by [[Smallest Element is Minimal]] there exists a [[Definition:Analytic Basis|basis]] $\mathcal B_0$ of $T$ which has minimal [[Definition:Cardinality|cardinality]]: +:$m_0 = \map {w_2} T$. +Let: +:$\displaystyle \map {w_1} T = \bigcap_{\BB \mathop \in \mathbb B} \card \BB$ +By [[Intersection is Subset/General Result|Intersection is Subset]]: +:$\displaystyle \map {w_1} T = \bigcap M \subseteq m_0$ +But by [[Intersection is Largest Subset/General Result|Intersection is Largest Subset]]: +:$\displaystyle \mathfrak m_0 \subseteq \bigcap M$ +By definition of [[Definition:Set Equality|set equality]]: +:$\map {w_1} T = \map {w_2} T$ +and hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Space is First-Countable iff Character not greater than Aleph 0} +Tags: First-Countable Spaces + +\begin{theorem} +Let $T$ be a [[Definition:Topological Space|topological space]]. +$T$ is [[Definition:First-Countable Space|first-countable]] {{iff}}: +:$\chi \left({T}\right) \leq \aleph_0$ +where $\chi \left({T}\right)$ denotes the [[Definition:Character of Topological Space|character]] of $T$. +\end{theorem} + +\begin{proof} +=== Sufficient Condition === +Let $T$ be [[Definition:First-Countable Space|first-countable]]. +By definition of [[Definition:First-Countable Space|first-countable]]: +:$\forall x \in T: \exists \mathcal B \in \mathbb B \left({x}\right): \left\vert{\mathcal B}\right\vert \leq \aleph_0$ +where $\mathbb B \left({x}\right)$ denotes the set of all [[Definition:Local Basis|local bases]] at $x$. +Then by definition of [[Definition:Character of Point in Topological Space|character of a point]]: +:$\forall x \in T: \chi \left({x, T}\right) \leq \aleph_0$ +Hence by definition of [[Definition:Character of Topological Space|character of topogical space]]: +:$\chi \left({T}\right) \leq \aleph_0$ +{{qed|lemma}} +=== Necessary Condition === +Let $\chi \left({T}\right) \leq \aleph_0$. +By definition of [[Definition:Character of Topological Space|character of topogical space]]: +:$\forall x \in T: \chi \left({x, T}\right) \leq \aleph_0$ +Then by definition of [[Definition:Character of Point in Topological Space|character of a point]]: +:$\forall x \in T: \exists \mathcal B \in \mathbb B \left({x}\right): \left\vert{\mathcal B}\right\vert \leq \aleph_0$ +Thus by definition of [[Definition:First-Countable Space|first-countable]]: +:$T$ is [[Definition:First-Countable Space|first-countable]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Difference of Two Powers/Examples/Difference of Two Cubes} +Tags: Third Powers, Difference of Two Cubes, Examples of Use of Difference of Two Powers + +\begin{theorem} +:$x^3 - y^3 = \paren {x - y} \paren {x^2 + x y + y^2}$ +\end{theorem} + +\begin{proof} +From [[Difference of Two Powers]]: +:$\displaystyle a^n - b^n = \paren {a - b} \sum_{j \mathop = 0}^{n - 1} a^{n - j - 1} b^j$ +The result follows directly by setting $n = 3$. +{{qed}} +\end{proof}<|endoftext|> +\section{Existence of Subfamily of Cardinality not greater than Weight of Space and Unions Equal} +Tags: Topology + +\begin{theorem} +Let $T$ be a [[Definition:Topological Space|topological space]]. +Let $\mathcal F$ be a [[Definition:Set of Sets|set]] of [[Definition:Open Set (Topology)|open sets]] of $T$. +There exists a [[Definition:Subset|subset]] $\mathcal G \subseteq \mathcal F$ such that: +:$\displaystyle \bigcup \mathcal G = \bigcup \mathcal F$ +and: +:$\left\vert{\mathcal G}\right\vert \leq w \left({T}\right)$ +where: +:$w \left({T}\right)$ denotes the [[Definition:Weight of Topological Space|weight]] of $T$ +:$\left\vert{\mathcal G}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $\mathcal G$. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Weight of Topological Space|weight]] of $T$ there exists a [[Definition:Analytic Basis|basis]] $\mathcal B$ of $T$ such that: +:$(1): \quad \left\vert{\mathcal B}\right\vert = w \left({T}\right)$ +Let: +:$\mathcal B_1 = \left\{{W \in \mathcal B: \exists U \in \mathcal F: W \subseteq U}\right\}$ +By definition of [[Definition:Subset|subset]]: +:$\mathcal B_1 \subseteq \mathcal B$ +Then by [[Subset implies Cardinal Inequality]]: +:$(2): \quad \left\vert{\mathcal B_1}\right\vert \leq \left\vert{\mathcal B}\right\vert$ +By definition of set $\mathcal B_1$: +:$\forall W \in \mathcal B_1: \exists U \in \mathcal F: W \subseteq U$ +Then by the [[Axiom:Axiom of Choice|Axiom of Choice]] there exists a [[Definition:Mapping|mapping]] $f$ from $\mathcal B_1$ into $\mathcal F$ such that +:$(3): \quad \forall U \in \mathcal B_1: U \subseteq f \left({U}\right)$ +Let: +:$\mathcal G = \operatorname{Im} \left({f}\right)$ +where $\operatorname{Im} \left({f}\right)$ denotes the [[Definition:Image of Mapping|image]] of $f$. +[[Definition:By Hypothesis|By hypothesis]]: +:$\mathcal G \subseteq \mathcal F$ +Then by [[Union of Subset of Family is Subset of Union of Family]]: +:$\displaystyle \bigcup \mathcal G \subseteq \bigcup \mathcal F$ +By definition of [[Definition:Set Equality|set equality]], to prove $\displaystyle \bigcup \mathcal G = \bigcup \mathcal F$ it is sufficient to show: +:$\displaystyle \bigcup \mathcal F \subseteq \bigcup \mathcal G$ +Let $\displaystyle x \in \bigcup \mathcal F$. +By definition of [[Definition:Union of Set of Sets|union]] there exists a [[Definition:Set|set]] $A$ such that: +:$A \in \mathcal F$ and $x \in A$ +Because $A$ is [[Definition:Open Set (Topology)|open]], then by definition of [[Definition:Analytic Basis|basis]] there exists $U \in \mathcal B$ such that: +:$x \in U \subseteq A$ +By definition of the set $\mathcal B_1$: +:$U \in \mathcal B_1$ +Because $\mathcal G = \operatorname{Im} \left({f}\right)$: +:$f \left({U}\right) \in \mathcal G$ +By $(3)$ and [[Set is Subset of Union]]: +:$\displaystyle U \subseteq f \left({U}\right) \subseteq \bigcup \mathcal G$ +Thus by definition of [[Definition:Subset|subset]]: +:$\displaystyle x \in \bigcup \mathcal G$ +This ends the proof of inclusion. +By [[Cardinality of Image of Mapping not greater than Cardinality of Domain]]: +:$\left\vert{\operatorname{Im} \left({f}\right)}\right\vert \leq \left\vert{\mathcal B_1}\right\vert$ +Thus by $(1)$ and $(2)$: +:$\left\vert{\mathcal G}\right\vert \leq w \left({T}\right)$ +{{qed}} +\end{proof}<|endoftext|> +\section{Set of Singletons is Smallest Basis of Discrete Space} +Tags: Discrete Topology + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Discrete Space|discrete]] [[Definition:Topological Space|topological space]]. +Let $\mathcal B = \left\{{\left\{{x}\right\} : x \in S}\right\}$. +Then $\mathcal B$ is the smallest [[Definition:Analytic Basis|basis]] of $T$. +That is: +:$\mathcal B$ is a [[Definition:Analytic Basis|basis]] of $T$ +and: +:for every [[Definition:Analytic Basis|basis]] $\mathcal C$ of $T$, $\mathcal B \subseteq \mathcal C$. +\end{theorem} + +\begin{proof} +By [[Basis for Discrete Topology]] $\mathcal B$ is a [[Definition:Analytic Basis|basis]] of $T$. +It remains to be shown that $\mathcal B$ is the smallest [[Definition:Analytic Basis|basis]] of $T$. +Let $\mathcal C$ be a [[Definition:Analytic Basis|basis]] of $T$. +Let $A \in \mathcal B$. +By definition of the set $\mathcal B$: +:$\exists x \in S: A = \left\{{x}\right\}$ +By definition of [[Definition:Analytic Basis|basis]]: +:$\exists B \in \mathcal C: x \in B \subseteq A$ +Then by [[Singleton of Element is Subset]]: +:$\left\{{x}\right\} \subseteq B$ +Hence $B = A$ by definition of [[Definition:Set Equality|set equality]]. +Thus $A \in \mathcal C$. +{{qed}} +\end{proof}<|endoftext|> +\section{Conditional and Inverse are not Equivalent} +Tags: Implication + +\begin{theorem} +A [[Definition:Conditional|conditional statement]]: +:$p \implies q$ +is not [[Definition:Logical Equivalence|logically equivalent]] to its [[Definition:Inverse Statement|inverse]]: +:$\lnot p \implies \lnot q$ +\end{theorem} + +\begin{proof} +We apply the [[Method of Truth Tables]] to the proposition: +:$\left({p \implies q}\right) \iff \left({\lnot p \implies \lnot q}\right)$ +$\begin{array}{|ccc|c|ccc|} \hline +p & \implies & q) & \iff & (\lnot & p & \implies & \lnot & q) \\ +\hline +F & T & F & T & T & F & T & T & F \\ +F & T & T & F & T & F & F & F & T \\ +T & F & F & F & F & T & T & T & F \\ +T & T & T & T & F & T & T & F & T \\ +\hline +\end{array}$ +As can be seen by inspection, the [[Definition:Truth Value|truth values]] under the [[Definition:Main Connective (Propositional Logic)|main connectives]] do not match for all [[Definition:Boolean Interpretation|boolean interpretations]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Weight of Discrete Topology equals Cardinality of Space} +Tags: Discrete Topology + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Discrete Space|discrete]] [[Definition:Topological Space|topological space]]. +Then: +:$w \left({T}\right) = \left\vert{S}\right\vert$ +where +:$w \left({T}\right)$ denotes the [[Definition:Weight of Topological Space|weight]] of $T$ +:$\left\vert{S}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $S$. +\end{theorem} + +\begin{proof} +By [[Basis for Discrete Topology]] the set $\mathcal B = \left\{{\left\{{x}\right\}: x \in S}\right\}$ is a [[Definition:Analytic Basis|basis]] of $T$. +By [[Set of Singletons is Smallest Basis of Discrete Space]] $\mathcal B$ is smallest basis of $T$: +:for every basis $\mathcal C$ of $T$, $\mathcal B \subseteq \mathcal C$. +Then by [[Subset implies Cardinal Inequality]]: +:for every basis $\mathcal C$ of $T$, $\left\vert{\mathcal B}\right\vert \leq \left\vert{\mathcal C}\right\vert$. +Hence $\left\vert{\mathcal B}\right\vert$ is minimal cardinalty of basis of $T$: +:$w \left({T}\right) = \left\vert{\mathcal B}\right\vert$ by definition of [[Definition:Weight of Topological Space|weight]]. +Thus by [[Cardinality of Set of Singletons]]: +:$w \left({T}\right) = \left\vert{S}\right\vert$. +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Set of Singletons} +Tags: Cardinals + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]]. +Let $T = \left\{ {\left\{{x}\right\}: x \in S}\right\}$ be the [[Definition:Set|set]] of all [[Definition:Singleton|singletons]] of [[Definition:Element|elements]] of $S$. +Then: +:$\left\vert T \right\vert = \left\vert S \right\vert$ +where $\left\vert S \right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $S$. +\end{theorem} + +\begin{proof} +Define a mapping $f: S \to T$: +:$\forall x \in S: f \left({x}\right) = \left\{ {x}\right\}$ +By [[Singleton Equality]]: +:$\forall x, y \in S: f \left({x}\right) = f \left({y}\right) \implies x = y$ +Then, by definition, $f$ is an [[Definition:Injection|injection]]. +By the definition of set $T$: +:$\forall y \in T: \exists x \in S: y = f \left({x}\right)$ +Then, by definition, $f$ is a [[Definition:Surjection|surjection]]. +Hence, by definition, $f: S \to T$ is a [[Definition:Bijection|bijection]]. +Thus, by definition, $S$ and $T$ are [[Definition:Set Equivalence|equivalent]]: +:$S \sim T$ +Thus by definition of [[Definition:Cardinality|cardinality]]: +:$\left\vert T \right\vert = \left\vert S \right\vert$ +{{qed}} +\end{proof}<|endoftext|> +\section{Basis has Subset Basis of Cardinality equal to Weight of Space} +Tags: Topology + +\begin{theorem} +Let $T = \struct {X, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let $\BB$ be a [[Definition:Analytic Basis|basis]] of $T$. +Then there exists a [[Definition:Analytic Basis|basis]] $\BB_0$ of $T$ such that +:$\BB_0 \subseteq \BB$ and $\card {\BB_0} = \map w T$ +where: +:$\card {\BB_0}$ denotes the [[Definition:Cardinality|cardinality]] of $\BB_0$ +:$\map w T$ denotes the [[Definition:Weight of Topological Space|weight]] of $T$. +\end{theorem} + +\begin{proof} +There are two cases: +:[[Definition:Infinite Set|infinite]] [[Definition:Weight of Topological Space|weight]] +:[[Definition:Finite Set|finite]] [[Definition:Weight of Topological Space|weight]]. +=== Case when Weight is Infinite === +Let $T$ has [[Definition:Infinite Set|infinite]] [[Definition:Weight of Topological Space|weight]]. +By definition of [[Definition:Weight of Topological Space|weight]], there exists a basis $\BB_1$ of $T$ such that: +:$(1): \quad \card {\BB_1} = \map w T$ +We will prove: +:$(2): \quad \displaystyle \forall U \in \BB_1: \exists \AA \subseteq \BB: U = \bigcup \AA \land \card \AA \le \map w T$ +Let $U \in \BB_1$. +Let $S = \set {W \in \BB: W \subseteq U}$. +By definition of [[Definition:Subset|subset]]: +:$S \subseteq \BB$ +By definition of [[Definition:Analytic Basis|basis]]: +$\displaystyle \bigcup S = U$ +By definition of set $S$, $S$ is set of open subset of $T$. +Then by [[Existence of Subfamily of Cardinality not greater than Weight of Space and Unions Equal]] there exists a subset $\AA \subseteq S$ such that: +:$\displaystyle \bigcup \AA = \bigcup S$ and $\card \AA \le \map w T$ +Thus by [[Subset Relation is Transitive]]: +:$\AA \subseteq \BB$. +Thus: +:$\displaystyle U = \bigcup \AA \land \card \AA \le \map w T$ +This ends proof of $(2)$. +By $(2)$ and [[Axiom:Axiom of Choice|Axiom of Choice]] there exists a mapping $f: \BB_1 \to \powerset \BB$ such that: +:$(3): \quad \displaystyle \forall U \in \BB_1: U = \bigcup \map f U \land \card {\map f U} \le \map w T$ +By [[Union is Smallest Superset]], because $\forall U \in \BB_1: \map f U \subseteq \BB$: +:$\displaystyle \bigcup_{U \mathop \in \BB_1} \map f U \subseteq \BB$ +Set $\BB_0 := \displaystyle \bigcup_{U \mathop \in \BB_1} \map f U = \bigcup \Img f$ +Now we will show that $\BB_0$ is [[Definition:Analytic Basis|basis]] of $T$. +By definition of [[Definition:Analytic Basis|basis]]: +:$\BB \subseteq \tau$ +Thus by [[Subset Relation is Transitive]]: +:$\BB_0 \subseteq \tau$ +Let $A$ be an open subset of $X$. +Let $p$ be a point of $X$ such that $p \in A$. +Then by definition of [[Definition:Analytic Basis|basis]] there exists $U \in \BB_1$ such that: +:$p \in U \subseteq A$. +By $(3)$, $U = \displaystyle \bigcup \map f U$. +Then by definition of [[Definition:Union of Set of Sets|union]] there exists a set $D$ such that: +:$p \in D \in \map f U$ +By [[Set is Subset of Union]]: +:$D \subseteq U$ +By definition of [[Definition:Union of Set of Sets|union]]: +:$D \in \BB_0$ +Thus by [[Subset Relation is Transitive]]: +:$\exists D \in \BB_0: p \in D \subseteq A$ +This by definition of [[Definition:Analytic Basis|basis]] ends a proof of basis. +By $(1)$ and [[Cardinality of Image of Mapping not greater than Cardinality of Domain]]: +:$\card {\Img f} \le \map w T$ +For every $U \in \BB_1$: +:$\card {\map f U} \le \map w T$ +Then by [[Cardinality of Union not greater than Product]]: +:$\card {\BB_0} \le \card {\map w T \times \map w T}$ +Thus by [[Cardinal Product Equal to Maximum]], because $\map w T$ is infinite : +:$\card {\BB_0} \le \map w T$. +Thus by definition of [[Definition:Weight of Topological Space|weight]]: +:$\card {\BB_0} = \map w T$. +{{qed|lemma}} +=== Case when Weight is Finite === +Let $T$ has [[Definition:Finite Set|finite]] [[Definition:Weight of Topological Space|weight]]. +By [[Finite Weight Space has Basis equal Image of Mapping of Intersections]], there exist a basis $\BB_0$ of $T$ and a mapping $f: X \to \tau$ such that: +:$\BB_0 = \Img f)$ +and: +:$\forall x \in X: \paren {x \in \map f x \land \forall U \in \tau: x \in U \implies \map f x \subseteq U}$ +Thus by [[Image of Mapping of Intersections is Smallest Basis]]: +:$\BB_0 \subseteq \BB$ +Thus by [[Cardinality of Image of Mapping of Intersections is not greater than Weight of Space]]: +:$\card {\BB_0} \le \map w T$ +Thus by definition of [[Definition:Weight of Topological Space|weight]]: +:$\card {\BB_0} = \map w T$ +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Union not greater than Product} +Tags: Cardinals, Set Union, Cardinality + +\begin{theorem} +Let $\FF$ be a [[Definition:Set of Sets|set of sets]]. +Let: +:$\size \FF \le \mathbf m$ +where +:$\size \FF$ denotes the [[Definition:Cardinality|cardinality]] of $\FF$ +:$\mathbf m$ is [[Definition:Cardinal Number|cardinal number]] (possibly [[Definition:Infinite Set|infinite]]). +Let: +:$\forall A \in \FF: \size A \le \mathbf n$ +where +:$\mathbf n$ is [[Definition:Cardinal Number|cardinal number]] (possibly [[Definition:Infinite Set|infinite]]). +Then: +:$\ds \size {\bigcup \FF} \le \size {\mathbf m \times \mathbf n} = \mathbf m \mathbf n$ +\end{theorem} + +\begin{proof} +$\FF = \O$ or $\FF = \set \O$ or $\O \ne \FF \ne \set \O$. +In case when $\FF = \O$ or $\FF = \set \O$: +{{begin-eqn}} +{{eqn | l = \size {\bigcup \FF} + | r = \size \O + | c = [[Union of Empty Set]], [[Union of Singleton]] +}} +{{eqn | o = \le + | r = \size {\mathbf m \times \mathbf n} + | c = [[Subset implies Cardinal Inequality]] because [[Empty Set is Subset of All Sets]] +}} +{{end-eqn}} +In case when $\O \ne \FF \ne \{\O\}$: +:by [[Surjection iff Cardinal Inequality]] there exists a surjection $f: \mathbf m \to \FF$ as $\size {\mathbf m} = \mathbf m$ by [[Cardinal of Cardinal Equal to Cardinal]]. +$\FF$ contains non empty set $A_0$. +:$\size {A_0} > \mathbf 0$. +By assumption: +:$\size {A_0} \le \mathbf n$. +Then: +:$\mathbf 0 < \mathbf n$. +Hence: +:$\size {\set 0} = \mathbf 1 \le \mathbf n$. +Define a family $\family {B_A}_{A \mathop \in \FF}$: +:$B_A = \begin{cases} A & A \ne \O \\ \set 0 & A = \O \end {cases}$ +By [[Surjection iff Cardinal Inequality]] define a family +$\family {g_A}_{A \mathop \in \FF}$ of [[Definition:Surjection|surjections]]: +:$\forall A \in \FF: g_A: \mathbf n \to B_A$ is a [[Definition:Surjection|surjection]]. +Define a mapping $h:\mathbf m \times \mathbf n \to \ds \bigcup_{A \mathop \in \FF} B_A$: +:$\forall \alpha \in \mathbf m: \forall \beta \in \mathbf n: \map h {\alpha, \beta} = \map {g_{\map f \alpha} } \beta$ +We will show by definition that $h$ is a [[Definition:Surjection|surjection]]. +Let $x \in \ds \bigcup_{A \mathop \in \FF} B_A$. +Then by definition of [[Definition:Union of Family|union]]: +:$\exists A \in \FF: x \in B_A$. +By definition of [[Definition:Surjection|surjection]]: +:$\exists \alpha \in \mathbf m: \map f \alpha = A$. +By definition of [[Definition:Surjection|surjection]]: +:$\exists \beta \in \mathbf n: \map {g_A} \beta = x$ +So: +:$\map h {\alpha, \beta} = \map {g_{\map f \alpha} } \beta = x$ +This ends the proof of [[Definition:Surjection|surjection]]. +Hence by [[Surjection iff Cardinal Inequality]]: +:$\ds \size {\bigcup_{A \mathop \in \FF} B_A} \le \size {\mathbf m \times \mathbf n}$ +By definition of [[Definition:Subset|subset]]: +:$\forall A \in \FF: A \subseteq B_A$. +Then by [[Set Union Preserves Subsets]]: +:$\ds \bigcup \FF \subseteq \bigcup_{A \mathop \in \FF} B_A$. +Hence by [[Subset implies Cardinal Inequality]]: +:$\ds \size {\bigcup \FF} \le \ds \size {\bigcup_{A \mathop \in \FF} B_A}$ +Thus the result: +:$\size {\bigcup \FF} \le \size {\mathbf m \times \mathbf n}$ +Thus by definition of [[Definition:Product of Cardinals|product of cardinals]]: +:$\size {\mathbf m \times \mathbf n} = \mathbf m \mathbf n$ +{{qed}} +\end{proof}<|endoftext|> +\section{Image of Mapping of Intersections is Smallest Basis} +Tags: Topology + +\begin{theorem} +Let $T = \left({X, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Let $f:X \to \tau$ be a mapping such that +:$\forall x \in X: \left({x \in f \left({x}\right) \land \forall U \in \tau: x \in U \implies f \left({x}\right) \subseteq U}\right)$. +Then the [[Definition:Image of Mapping|image]] $\operatorname{Im} \left({f}\right)$ is subset of every [[Definition:Analytic Basis|basis]] of $T$. +\end{theorem} + +\begin{proof} +Let $\mathcal B$ be a [[Definition:Analytic Basis|basis]]. +Let $V \in \operatorname{Im} \left({f}\right)$. +Then by definition of [[Definition:Image of Mapping|image]] there exists a [[Definition:Element|point]] $b \in X$ such that +: $V = f \left({b}\right)$. +Then $V$ is [[Definition:Open Set (Topology)|open]] because $\operatorname{Im} \left({f}\right) \subseteq \tau$. +By assumption of mapping $f$: +: $b \in V$. +Then by definition of [[Definition:Analytic Basis|basis]] there exists a subset $U \in \mathcal B$ such that +: $b \in U \subseteq V$. +By definition of [[Definition:Analytic Basis|basis]]: +: $\mathcal B \subseteq \tau$. +Then by definition of [[Definition:Subset|subset]]: +: $U \in \tau$. +Then by assumption of mapping $f$: +:$f \left({b}\right) \subseteq U$. +Thus by definition of [[Definition:Set Equality|set equality]]: +: $V = U \in \mathcal B$. +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Image of Mapping of Intersections is not greater than Weight of Space} +Tags: Topology, Cardinals + +\begin{theorem} +Let $T = \struct {X, \tau}$ be a [[Definition:Topological Space|topological space]]. +Let $f: X \to \tau$ be a [[Definition:Mapping|mapping]] such that: +:$\forall x \in X: \paren {x \in \map f x \land \forall U \in \tau: x \in U \implies \map f x \subseteq U}$ +Then the [[Definition:Cardinality|cardinality]] of the [[Definition:Image of Mapping|image]] of $f$ is no greater than the [[Definition:Weight of Topological Space|weight]] of $T$: +$\card {\Img f} \le \map w T$. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Weight of Topological Space|weight]] there exists a [[Definition:Analytic Basis|basis]] $\BB$ of $T$ such that. +:$\card \BB = \map w T$ +By [[Image of Mapping of Intersections is Smallest Basis]]: +:$\Img f \subseteq \BB$ +Thus by [[Subset implies Cardinal Inequality]]: +:$\card {\Img f} \le \card \BB = \map w T$ +{{qed}} +\end{proof}<|endoftext|> +\section{Finite Weight Space has Basis equal to Image of Mapping of Intersections} +Tags: Topology + +\begin{theorem} +Let $T = \struct {X, \tau}$ be a [[Definition:Topological Space|topological space]] with [[Definition:Finite Set|finite]] [[Definition:Weight of Topological Space|weight]]. +Then there exist a [[Definition:Analytic Basis|basis]] $\BB$ of $T$ and a mapping $f:X \to \tau$ such that: +:$\BB = \Img f$ and +:$\forall x \in X: \paren {x \in \map f x \land \forall U \in \tau: x \in U \implies \map f x \subseteq U}$ +where $\Img f$ denotes the [[Definition:Image of Mapping|image]] of $f$. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Weight of Topological Space|weight]] there exists a [[Definition:Analytic Basis|basis]] $\BB$ such that: +:$\card \BB = \map w T$ +where: +:$\map w T$ denotes the [[Definition:Weight of Topological Space|weight]] of $T$ +:$\card \BB$ denotes the [[Definition:Cardinality|cardinality]] of $\BB$. +By assumption that weight is finite: +:$\card \BB$ is finite +Then by [[Cardinality of Set is Finite iff Set is Finite]]: +:$\BB$ is finite +Define a mapping $f: X \to \powerset X$: +:$(1): \quad \forall x \in X: \map f x = \bigcap \set {U \in \BB: x \in U}$ +By definition of [[Definition:Subset|subset]]: +:$\forall x \in X: \set {U \in \BB: x \in U} \subseteq \BB$ +By [[Subset of Finite Set is Finite]]: +:$\forall x \in X: \set {U \in \BB: x \in U}$ is finite +Then by [[General Intersection Property of Topological Space]]: +:$\forall x \in X: \bigcap \set {U \in \BB: x \in U} \in \tau$ +So: +:$f: X \to \tau$ +We will prove that: +:$(2): \quad \forall x \in X: \paren {x \in \map f x \land \forall U \in \tau: x \in U \implies \map f x \subseteq U}$ +Let $x \in X$. +By $(1)$: +:$\map f x = \bigcap \set {U \in \BB: x \in U}$ +Thus by definition of [[Definition:Set Intersection|intersection]]: +:$x \in \map f x$ +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x \in U$. +By definition of [[Definition:Analytic Basis|basis]]: +:$\exists V \in \BB: x \in V \subseteq U$ +Then: +:$V \in \set {U \in \BB: x \in U}$ +Hence by [[Intersection is Subset]]: +:$\map f x \subseteq V$ +Thus by [[Subset Relation is Transitive]]: +:$\map f x \subseteq U$ +This ends the proof of $(2)$. +We will prove that $\Img f$ is a [[Definition:Analytic Basis|basis]] of $T$. +By $f: X \to \tau$ and definition of [[Definition:Image of Mapping|image]]: +:$\Img f \subseteq \tau$ +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $T$. +Let $x$ be a [[Definition:Element|point]] $x \in X$ such that: +:$x \in U$ +By $(2)$: +:$\map f x \in \Img f \land x \in \map f x \subseteq U$ +By definition of [[Definition:Analytic Basis|basis]] this ends the proof of basis. +Thus the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Rubik's Cube has 54 Facets} +Tags: Rubik's Cube + +\begin{theorem} +Let $S$ be the [[Definition:Set|set]] of [[Definition:Facet of Rubik's Cube|facets]] of [[Definition:Rubik's Cube|Rubik's cube]]. +Then the [[Definition:Cardinality of Finite Set|cardinality]] of $S$ is given by: +:$\card S = 54$ +That is: +:A [[Definition:Rubik's Cube|Rubik's cube]] has $54$ [[Definition:Facet of Rubik's Cube|facets]]. +\end{theorem} + +\begin{proof} +A [[Definition:Cube (Geometry)|cube]], by definition, has $6$ [[Definition:Face of Polyhedron|faces]]. +Each [[Definition:Face of Polyhedron|face]] is subdivided into $9$ [[Definition:Facet of Rubik's Cube|facets]]. +Hence there are $6 \times 9 = 54$ [[Definition:Facet of Rubik's Cube|facets]] in total. +{{qed}} +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Symmetric Difference/(3) iff (5)} +Tags: Equivalence of Definitions of Symmetric Difference + +\begin{theorem} +Let $S$ and $T$ be [[Definition:Set|sets]]. +{{TFAENocat|def = Symmetric Difference|view = symmetric difference $S * T$ between $S$ and $T$}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = x \in S * T + | o = \iff + | r = x \in S \oplus x \in T + | c = [[Definition:Symmetric Difference/Definition 5|Symmetric Difference: Definition 5]] +}} +{{eqn | r = \left({\neg \left({x \in S}\right) \land \left({x \in T}\right)}\right) \lor \left({\left({x \in S}\right) \land \neg \left({x \in T}\right)}\right) + | o = \iff + | c = [[Non-Equivalence as Disjunction of Conjunctions]] +}} +{{eqn | r = \left({x \in \overline S \land x \in T}\right) \lor \left({x \in S \land x \in \overline T}\right) + | o = \iff + | c = {{Defof|Set Complement}} +}} +{{eqn | r = \left({x \in \overline S \cup T}\right) \lor \left({x \in S \cup \overline T}\right) + | o = \iff + | c = {{Defof|Set Intersection}} +}} +{{eqn | r = x \in \left({\overline S \cup T}\right) \cup \left({S \cup \overline T}\right) + | o = \iff + | c = {{Defof|Set Union}} +}} +{{eqn | r = x \in \left({S \cup \overline T}\right) \cup \left({\overline S \cup T}\right) + | o = \iff + | c = [[Union is Commutative]] +}} +{{end-eqn}} +The result follows by definition of [[Definition:Set Equality|set equality]]. +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Symmetric Difference/(2) iff (5)} +Tags: Equivalence of Definitions of Symmetric Difference + +\begin{theorem} +Let $S$ and $T$ be [[Definition:Set|sets]]. +{{TFAENocat|def = Symmetric Difference|view = symmetric difference $S * T$ between $S$ and $T$}} +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = x \in S * T + | o = \iff + | r = x \in S \oplus x \in T + | c = [[Definition:Symmetric Difference/Definition 5|Symmetric Difference: Definition 5]] +}} +{{eqn | r = \left({x \in S \lor x \in T} \right) \land \neg \left({x \in S \land x \in T}\right) + | o = \iff + | c = Definition of [[Definition:Exclusive Or|Exclusive Or]] +}} +{{eqn | r = \left({x \in S \cup T}\right) \land \left({x \notin S \cap T}\right) + | o = \iff + | c = Definition of [[Definition:Set Intersection|Set Intersection]] and [[Definition:Set Union|Set Union]] +}} +{{eqn | r = x \in \left({S \cup T}\right) \setminus \left({S \cap T}\right) + | o = \iff + | c = Definition of [[Definition:Set Difference|Set Difference]] +}} +{{end-eqn}} +The result follows by definition of [[Definition:Set Equality|set equality]]. +\end{proof}<|endoftext|> +\section{Partition of Facets of Rubik's Cube} +Tags: Rubik's Cube + +\begin{theorem} +Let $S$ denote the [[Definition:Set|set]] of the [[Definition:Facet of Rubik's Cube|facets]] of [[Definition:Rubik's Cube|Rukik's cube]]. +Then $S$ can be [[Definition:Set Partition|partitioned]] as follows: +:$S = \set {S_C \mid S_E \mid S_Z}$ +where: +:$S_C$ denotes the [[Definition:Set|set]] of [[Definition:Corner Facet of Rubik's Cube|corner facets]] +:$S_E$ denotes the [[Definition:Set|set]] of [[Definition:Edge Facet of Rubik's Cube|edge facets]] +:$S_Z$ denotes the [[Definition:Set|set]] of [[Definition:Center Facet of Rubik's Cube|center facets]]. +\end{theorem} + +\begin{proof} +From the definition of the [[Definition:Facet of Rubik's Cube|facets]], each [[Definition:Face of Rubik's Cube|face]] is divided into $9$ [[Definition:Facet of Rubik's Cube|facets]]. +:[[File:RubiksCubeFacets.png|600px]] +A [[Definition:Facet of Rubik's Cube|facet]] is either: +:on the corner of a [[Definition:Face of Rubik's Cube|face]], for example $flu$, $fru$ +:on the edge of a [[Definition:Face of Rubik's Cube|face]], for example $fu$, $fr$ +:in the center of a [[Definition:Face of Rubik's Cube|face]], for example $F$. +:$(1):\quad$ Each [[Definition:Facet of Rubik's Cube|facet]] can be either in $S_C$ or $S_E$ or $S_Z$ and can not be in more than one. +:$(2):\quad$ Each [[Definition:Facet of Rubik's Cube|facet]] can be either in $S_C$ or $S_E$ or $S_Z$ and there are no other possibilities. +:$(3):\quad$ None of $S_C$, $S_E$ and $S_Z$ is [[Definition:Empty Set|empty]]. +Thus the criteria for $S = \set {S_C \mid S_E \mid S_Z}$ to be a [[Definition:Set Partition|partition]] are fulfilled. +{{qed}} +\end{proof}<|endoftext|> +\section{Even and Odd Integers form Partition of Integers} +Tags: Set Partitions, Odd Integers, Even Integers, Integers, 2 + +\begin{theorem} +The [[Definition:Odd Integer|odd integers]] and [[Definition:Even Integer|even integers]] form a [[Definition:Set Partition|partition]] of the [[Definition:Integer|integers]]. +\end{theorem} + +\begin{proof} +Let $n \in \Z$ be an [[Definition:Integer|integer]]. +Let $\Bbb O$ be the set of [[Definition:Odd Integer|odd integers]] and $\Bbb E$ be the set of [[Definition:Even Integer|even integers]]. +By the [[Division Theorem]]: +:$\forall n \in \Z: \exists! q, r \in \Z: n = 2 q + r, 0 \le r < 2$ +from which it follows that either: +:$n = 2 q \in \Bbb E$ +or: +:$n = 2 q + 1 \in \Bbb O$ +Thus: +:$(1): \quad$ each [[Definition:Element|element]] of $\Z$ is in no more than one of $\Bbb E$ and $\Bbb O$ +:$(2): \quad$ each [[Definition:Element|element]] of $\Z$ is in at least one of $\Bbb E$ and $\Bbb O$ +and: +:$(3): \quad$ setting $q = 0$ it is seen that $0 \in \Bbb E$ and $1 \in \Bbb O$ and so neither $\Bbb E$ or $\Bbb O$ is [[Definition:Empty Set|empty]]. +Thus $\set {\Bbb E \mid \Bbb O}$ is a [[Definition:Set Partition|partition]] of $\Z$ by definition. +{{qed}} +\end{proof}<|endoftext|> +\section{Analog between Logic and Set Theory} +Tags: Set Theory, Logic + +\begin{theorem} +The concepts of [[Definition:Set Theory|set theory]] have directly corresponding concepts in [[Definition:Logic|logic]]: +:{| border = "1" +|- +! style="padding: 2px 10px" | Set Theory +! style="padding: 2px 10px" | Logic +|- +| align="left" style="padding: 2px 10px"| [[Definition:Set|Set]]: $S, T$ +| align="left" style="padding: 2px 10px"| [[Definition:Statement|Statement]]: $p, q$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Set Union|Union]]: $S \cup T$ +| align="left" style="padding: 2px 10px"| [[Definition:Disjunction|Disjunction]]: $p \lor q$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Set Intersection|Intersection]]: $S \cap T$ +| align="left" style="padding: 2px 10px"| [[Definition:Conjunction|Conjunction]]: $p \land q$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Subset|Subset]]: $S \subseteq T$ +| align="left" style="padding: 2px 10px"| [[Definition:Conditional|Conditional]]: $p \implies q$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Symmetric Difference|Symmetric Difference]]: $S * T$ +| align="left" style="padding: 2px 10px"| [[Definition:Exclusive Or|Exclusive Or]]: $p \oplus q$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Set Complement|Complement]]: $\relcomp {} S$ +| align="left" style="padding: 2px 10px"| [[Definition:Logical Not|Logical Not]]: $\lnot p$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Set Equality|Set Equality]]: $S = T$ +| align="left" style="padding: 2px 10px"| [[Definition:Biconditional|Biconditional]]: $p \iff q$ +|- +| align="left" style="padding: 2px 10px"| [[Definition:Venn Diagram|Venn Diagram]] +| align="left" style="padding: 2px 10px"| [[Definition:Truth Table|Truth Table]] +|} +\end{theorem} + +\begin{proof} +Let $P$ and $Q$ be [[Definition:Propositional Function|propositional functions]]. +Let $S$ and $T$ be [[Definition:Subset|subsets]] of a [[Definition:Universal Set|universe]] $\Bbb U$ such that: +:$S = \set {x \in \Bbb U: \map P x}$ +:$T = \set {x \in \Bbb U: \map Q x}$ +By the following definitions: +{{begin-axiom}} +{{axiom | n = 1 + | lc= [[Definition:Set Intersection|Intersection]]: + | ml= S \cap T + | mo= := + | mr= \set {x \in \Bbb U: \map P x \land \map Q x} +}} +{{axiom | n = 2 + | lc= [[Definition:Set Union|Union]]: + | ml= S \cup T + | mo= := + | mr= \set {x \in \Bbb U: \map P x \lor \map Q x} +}} +{{axiom | n = 3 + | lc= [[Definition:Subset|Subset]]: + | ml= S \subseteq T + | mo= := + | mr= \forall x \in \Bbb U: \map P x \implies \map Q x +}} +{{axiom | n = 4 + | lc= [[Definition:Symmetric Difference|Symmetric Difference]]: + | ml= S * T + | mo= = + | mr= \set {x \in \Bbb U: \map P x \oplus \map Q x} +}} +{{axiom | n = 5 + | lc= [[Definition:Set Complement|Complement]]: + | ml= \relcomp {} S + | mo= := + | mr= \set {x \in \Bbb U: \lnot \map P x} +}} +{{axiom | n = 6 + | lc= [[Definition:Set Equality|Set Equality]]: + | ml= S = T + | mo= := + | mr= \forall x \in \Bbb U: \map P x \iff \map Q x +}} +{{end-axiom}} +{{qed}} +\end{proof}<|endoftext|> +\section{Euler Phi Function of 1} +Tags: Examples of Euler Phi Function + +\begin{theorem} +:$\map \phi 1 = 1$ +\end{theorem} + +\begin{proof} +The only [[Definition:Strictly Positive Integer|(strictly) positive integer]] less than or equal to $1$ is $1$ itself. +By [[Integer is Coprime to 1]], $1$ is [[Definition:Coprime Integers|coprime]] to itself. +Hence, by definition, there is exactly $1$ [[Definition:Integer|integer]] less than or equal to $1$ which is [[Definition:Coprime Integers|coprime]] with $1$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Set is Finite iff Set is Finite} +Tags: Cardinals + +\begin{theorem} +Let $A$ be a [[Definition:Set|set]]. +$\left\vert{A}\right\vert$ is [[Definition:Finite Set|finite]] +{{iff}} +$A$ is finite +where $\left\vert{A}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $A$. +\end{theorem} + +\begin{proof} +Definition of [[Definition:Cardinal|cardinal]]: +:$(1): \quad \left\vert A \right\vert \sim A$. +:$\left\vert{A}\right\vert$ is [[Definition:Finite Set|finite]] +{{iff}} +:$\exists n \in \N: \left\vert A \right\vert \sim \N_n$ by definition of [[Definition:Finite Set|finite set]] +{{iff}} +: $\exists n \in \N: A \sim \N_n$ by $(1)$ and [[Set Equivalence is Equivalence Relation]] +{{iff}} +: $A$ is finite by definition of [[Definition:Finite Set|finite set]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Multiplication using Parabola} +Tags: Parabolas, Multiplication + +\begin{theorem} +:[[File:Multiplication-using-Parabola.png|500px|right|thumb]] +Let the [[Definition:Parabola|parabola]] $P$ defined as $y = x^2$ be plotted on the [[Definition:Cartesian Plane|Cartesian plane]]. +Let $A = \tuple {x_a, y_a}$ and $B = \tuple {x_b, y_b}$ be [[Definition:Point|points]] on the curve $\map f x$ so that $x_a < x_b$. +Then the [[Definition:Line Segment|line segment]] joining $A B$ will cross the [[Definition:Y-Axis|$y$-axis]] at $-x_a x_b$. +Thus $P$ can be used as a [[Definition:Nomogram|nomogram]] to calculate the [[Definition:Real Multiplication|product]] of two [[Definition:Real Number|numbers]] $x_a$ and $x^b$, as follows: +:$(1) \quad$ Find the points $-x_a$ and $x_b$ on the [[Definition:X-Axis|$x$-axis]]. +:$(2) \quad$ Find the points $A$ and $B$ where the lines $x = -x_a$ and $x = x_b$ [[Definition:Intersection (Geometry)|cut]] $P$. +:$(3) \quad$ Lay a [[Definition:Straightedge|straightedge]] on the [[Definition:Straight Line|straight line]] joining $A$ and $B$ and locate its [[Definition:Intercept|$y$-intercept]] $c$. +Then $x_a x_b$ can be read off from the [[Definition:Y-Axis|$y$-axis]] as the position of $c$. +\end{theorem} + +\begin{proof} +Let $\map f x = x^2$. +Then: +:$\map f {x_a} = x_a^2$ +and: +:$\map f {x_b} = x_b^2$ +Then the [[Definition:Slope|slope]] $m$ of the [[Definition:Line Segment|line segment]] joining $A B$ will be: +{{begin-eqn}} +{{eqn | l = m + | r = \frac {x_a^2 - x_b^2} {x_b - x_a} + | c = [[Equation of Straight Line in Plane/Point-Slope Form|Equation of Straight Line in Plane: Point-Slope Form]] +}} +{{eqn | r = \frac {\paren {x_a - x_b} \paren {x_b + x_a} } {x_b - x_a} + | c = [[Difference of Two Squares]] +}} +{{eqn | r = x_b + x_a + | c = cancelling, $x_a \neq x_b$ +}} +{{end-eqn}} +From [[Equation of Straight Line in Plane/Slope-Intercept Form|Equation of Straight Line in Plane: Slope-Intercept Form]]: +:$y = \paren {x_b + x_a} x + c$ +where $c$ denotes the [[Definition:Intercept|$y$-intercept]]. +Substituting the [[Definition:Coordinates|coordinates]] of point $A = \tuple {x_a, x_a^2}$ for $\tuple {x, y}$: +{{begin-eqn}} +{{eqn | l = x_a^2 + | r = \paren {x_b + x_a} x_a + c +}} +{{eqn | ll= \leadsto + | l = c + | r = x_a^2 - \paren {x_a + x_b} x_a +}} +{{eqn | r = x_a^2 - x_a^2 - x_b x_a +}} +{{eqn | r = -x_b x_a +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Topology Generated by Closed Sets} +Tags: Topology + +\begin{theorem} +Let $X$ be a [[Definition:Set|set]]. +Let $\mathcal F$ be a [[Definition:Set of Sets|set]] of [[Definition:Subset|subsets]] of $X$. +Suppose that +:$\varnothing \in \mathcal F$ and +:for every subsets $A$ and $B$ of $X$ if $A, B \in \mathcal F$, then $A \cup B \in \mathcal F$ and +:for every subset $\mathcal G \subseteq \mathcal F$, $\bigcap \mathcal G \in \mathcal F$ and +:$\tau = \left\{{\complement_X \left(A\right): A \in \mathcal F}\right\}$. +Then: +:$T = \left( {X, \tau} \right)$ is [[Definition:Topological Space|topological space]] and +:for every subset $A$ of $X$, $A$ is [[Definition:Closed Set (Topology)|closed]] in $T$ {{iff}} $A \in \mathcal F$. +\end{theorem}<|endoftext|> +\section{Equivalence of Definitions of Countably Infinite Set} +Tags: Countable Sets + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]]. +{{TFAE|def = Countably Infinite Set}} +\end{theorem} + +\begin{proof} +From [[Integers are Countably Infinite]] there is a [[Definition:Bijection|bijection]] between $\Z$, the [[Definition:Integer|set of integers]], and $\N$, the [[Definition:Natural Numbers|set of natural numbers]]. +Let $h: \N \to \Z$ be such a [[Definition:Bijection|bijection]]. +Let $f: S \to \N$ be a [[Definition:Bijection|bijection]]. +From [[Composite of Bijections is Bijection]]: +:$h \circ f: S \to \Z$ is a [[Definition:Bijection|bijection]]. +Similarly, let $g: S \to \Z$ be a [[Definition:Bijection|bijection]]. +By [[Inverse of Bijection is Bijection]], $h^{-1}: \Z \to \N$ is a [[Definition:Bijection|bijection]]. +Again from [[Composite of Bijections is Bijection]]: +:$h^{-1} \circ g: S \to \N$ is a [[Definition:Bijection|bijection]]. +Hence the result. +{{qed}} +[[Category:Countable Sets]] +bya5obx5auhsemkb9y5lnv8ebjer6c5 +\end{proof}<|endoftext|> +\section{Set of Odd Integers is Countably Infinite} +Tags: Countable Sets, Odd Integers + +\begin{theorem} +Let $\Bbb O$ be the [[Definition:Set|set]] of [[Definition:Odd Integer|odd integers]]. +Then $\Bbb O$ is [[Definition:Countably Infinite Set|countably infinite]]. +\end{theorem} + +\begin{proof} +Let $f: \Bbb O \to \Z$ be the [[Definition:Mapping|mapping]] defined as: +:$\forall x \in \Bbb O: \map f x = \dfrac {x + 1} 2$ +$f$ is [[Definition:Well-Defined Mapping|well-defined]] as $x + 1$ is [[Definition:Even Integer|even]] and so $\dfrac {x + 1} 2 \in \Z$. +Let $x, y \in \Bbb O$ such that $\map f x = \map f y$. +Then: +{{begin-eqn}} +{{eqn | l = \map f x + | r = \map f y + | c = +}} +{{eqn | ll= \leadsto + | l = \dfrac {x + 1} 2 + | r = \dfrac {y + 1} 2 + | c = Definition of $f$ +}} +{{eqn | ll= \leadsto + | l = x + 1 + | r = y + 1 + | c = +}} +{{eqn | ll= \leadsto + | l = x + | r = y + | c = +}} +{{end-eqn}} +Thus $f$ is [[Definition:Injection|injective]] by definition. +Consider the [[Definition:Inverse of Mapping|inverse]] $f^{-1}$. +By inspection: +:$\forall x \in \Z: \map {f^{-1} } x = 2 x - 1$ +$f^{-1}$ is [[Definition:Well-Defined Mapping|well-defined]], and $2 x - 1$ is [[Definition:odd Integer|odd]]. +Thus $f^{-1}$ is a [[Definition:Mapping|mapping]] from $\Z$ to $\Bbb O$. +Then: +{{begin-eqn}} +{{eqn | l = \map {f^{-1} } x + | r = \map {f^{-1} } y + | c = +}} +{{eqn | ll= \leadsto + | l = 2 x - 1 + | r = 2 y - 1 + | c = Definition of $f^{-1}$ +}} +{{eqn | ll= \leadsto + | l = 2 x + | r = 2 y + | c = +}} +{{eqn | ll= \leadsto + | l = x + | r = y + | c = +}} +{{end-eqn}} +Thus $f^{-1}$ is [[Definition:Injection|injective]] by definition. +It follows by the [[Cantor-Bernstein-Schröder Theorem]] that there exists a [[Definition:Bijection|bijection]] between $\Z$ and $\Bbb O$. +{{qed}} +\end{proof}<|endoftext|> +\section{Unique Readability for Polish Notation} +Tags: Formal Systems + +\begin{theorem} +Let $\AA$ be an [[Definition:Alphabet|alphabet]]. +Then [[Definition:Polish Notation|Polish notation]] for $\AA$ has the [[Definition:Unique Readability Property|unique readability property]]. +\end{theorem} + +\begin{proof} +Let $\phi$ be a [[Definition:WFF|WFF]] of [[Definition:Polish Notation|Polish notation]] for $\AA$. +Apply the [[Principle of Mathematical Induction]] on the [[Definition:Length of Sequence|length]] of $\phi$ to prove: +:$(1): \quad$ No [[Definition:Initial Part|initial part]] of $\phi$ is a [[Definition:WFF|WFF]], except $\phi$ itself; +:$(2): \quad$ If the first [[Definition:Symbol|symbol]] of $\phi$ has [[Definition:Arity|arity]] $n$, then there exist [[Definition:Unique|unique]] [[Definition:WFF|WFFs]] $\phi_1, \ldots, \phi_n$ such that $\phi = s \phi_1 \cdots \phi_n$. +Let $\phi'$ be a [[Definition:WFF|WFF]] that is an [[Definition:Initial Part|initial part]] of $\phi$. +Because all [[Definition:WFF|WFFs]] have at least [[Definition:Length of Sequence|length]] $1$, it follows that: +:$\phi' = s \phi'_1 \cdots \phi'_n$ +Now it must be that either $\phi_1$ is an [[Definition:Initial Part|initial part]] of $\phi'_1$ or vice versa. +By the [[Definition:Induction Hypothesis|induction hypothesis]] on $(1)$, it must be that $\phi_1 = \phi'_1$ because $\phi_1$ and $\phi'_1$ are both strictly shorter than $\phi$. +Now if, given $1 < j \le n$: +:$\phi_i = \phi'_i$ for all $i < j$ +it follows that $\phi_j$ and $\phi'_j$ start at the same position in $\phi$. +Hence again, inductively, it follows that $\phi_j = \phi'_j$. +Thus $\phi_i = \phi'_i$ for all $1 \le i \le n$; that is: +:$\phi = \phi'$ +The result follows by the [[Principle of Mathematical Induction]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Identity Matrix is Permutation Matrix} +Tags: Unit Matrices, Permutation Matrices + +\begin{theorem} +An [[Definition:Identity Matrix|identity matrix]] is an example of a [[Definition:Permutation Matrix|permutation matrix]]. +\end{theorem} + +\begin{proof} +An [[Definition:Identity Matrix|identity matrix]], by definition, has instances of $1$ on the [[Definition:Main Diagonal|main diagonal]] and $0$ elsewhere. +Each [[Definition:Diagonal Element|diagonal element]] is by definition on one [[Definition:Row of Matrix|row]] and one [[Definition:Column of Matrix|column]] of the [[Definition:Square Matrix|matrix]]. +Also by definition, each [[Definition:Diagonal Element|diagonal element]] is on a different [[Definition:Row of Matrix|row]] and +[[Definition:Column of Matrix|column]] from each other [[Definition:Diagonal Element|diagonal element]]. +The result follows by definition of [[Definition:Permutation Matrix|permutation matrix]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Full Rook Matrix is Invertible} +Tags: Rook Matrices + +\begin{theorem} +A [[Definition:Full Rook Matrix|full rook matrix]] is [[Definition:Invertible Matrix|invertible]]. +\end{theorem} + +\begin{proof} +Let $\mathbf A$ be a [[Definition:Full Rook Matrix|full rook matrix]]. +By definition, $\mathbf A$ is an instance of a [[Definition:Permutation Matrix|permutation matrix]]. +By [[Determinant of Permutation Matrix]], it follows that $\det \mathbf A = \pm 1$. +By [[Matrix is Invertible iff Determinant has Multiplicative Inverse]]: +:$\mathbf A$ is [[Definition:Invertible Matrix|invertible]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Product of Rook Matrices is Rook Matrix} +Tags: Rook Matrices + +\begin{theorem} +Let $\mathbf A$ and $\mathbf B$ be [[Definition:Rook Matrix|rook matrices]]. +Their [[Definition:Matrix Product (Conventional)|product]] $\mathbf {A B}$ is also a [[Definition:Rook Matrix|rook matrix]]. +\end{theorem} + +\begin{proof} +An [[Definition:Element of Matrix|element]] $a b_{ij}$ of $\mathbf {A B}$ is formed by [[Definition:Real Multiplication|multiplying]] each [[Definition:Element of Matrix|element]] of [[Definition:Row of Matrix|row]] $i$ of $\mathbf A$ by its corresponding [[Definition:Element of Matrix|element]] of [[Definition:Column of Matrix|column]] $j$ of $\mathbf B$. +No more than $1$ [[Definition:Element of Matrix|element]] of [[Definition:Row of Matrix|row]] $i$ equals $1$, and the rest equal $0$. +No more than $1$ [[Definition:Column of Matrix|column]] $k$ of $\mathbf B$ contains $1$ in its $i$th [[Definition:Element of Matrix|element]], and the rest contain $0$. +So of all the [[Definition:Element of Matrix|elements]] of [[Definition:Row of Matrix|row]] $i$ of $\mathbf {A B}$, only $a b_{ik}$ is $1$, and the rest are $0$. +By the same argument, each [[Definition:Row of Matrix|row]] of $\mathbf {A B}$ contains no more than one $1$, and all the rest of the [[Definition:Element of Matrix|elements]] are $0$. +In $\mathbf B$, each [[Definition:Column of Matrix|column]] $j$ has no more than one [[Definition:Element of Matrix|element]] equal to $1$, and all are in a different [[Definition:Row of Matrix|row]] $k$. +Thus each [[Definition:Row of Matrix|row]] contains its $1$, if it has one, in a different [[Definition:Column of Matrix|column]] from all the other [[Definition:Row of Matrix|rows]]. +So there is no more than one $1$ in each [[Definition:Column of Matrix|column]] of $\mathbf {A B}$. +Hence the result by definition of [[Definition:Rook Matrix|rook matrix]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Topology Defined by Basis} +Tags: Topology + +\begin{theorem} +Let $S$ be a [[Definition:Set|set]]. +Let $\mathcal B$ be a [[Definition:Set of Sets|set]] of [[Definition:Subset|subsets]] of $S$. +Suppose that +:$(B1): \quad \forall A_1, A_2 \in \mathcal B: \forall x \in A_1 \cap A_2: \exists A \in \mathcal B: x \in A \subseteq A_1 \cap A_2$ +:$(B2): \quad \forall x \in X: \exists A \in \mathcal B: x \in A$ +::$\tau = \left\{{\bigcup \mathcal G: \mathcal G \subseteq \mathcal B}\right\}$ +Then: +:$T = \left({S, \tau}\right)$ is a [[Definition:Topological Space|topological space]] +:$\mathcal B$ is a [[Definition:Analytic Basis|basis]] of $T$. +\end{theorem} + +\begin{proof} +We have to prove [[Definition:Open Set Axioms|Open Set Axioms]] $(O1)-(O3)$: +:$(O1): \quad$ The [[Definition:Union of Set of Sets|union]] of an arbitrary [[Definition:Subset|subset]] of $\tau$ is an [[Definition:Element|element]] of $\tau$. +Let $\mathcal F \subseteq \tau$. +Define by definition of $\tau$ a family $\left({\mathcal G_A}\right)_{A \in \mathcal F}$ such that +:$\forall A \in \mathcal F: A = \bigcup \mathcal G_A \land \mathcal G_A \subseteq \mathcal B$. +By [[General Distributivity of Set Union]]: +:$\displaystyle \bigcup \bigcup_{A \in \mathcal F} \mathcal G_A = \bigcup_{A \in \mathcal F} \bigcup \mathcal G_A = \bigcup \mathcal F$ +By [[Union of Subsets is Subset/Family of Sets]]: +:$\displaystyle \bigcup_{A \in \mathcal F} \mathcal G_A \subseteq \mathcal B$ +Thus by definition of $\tau$ +:$\bigcup \mathcal F \in \tau$ +:$(O2): \quad$ The [[Definition:Set Intersection|intersection]] of any two [[Definition:Element|elements]] of $\tau$ is an element of $\tau$. +Let $A$ and $B$ be elements of $\tau$. +By definition of $\tau$ there exist subsets $\mathcal G_A$ and $\mathcal G_B$ of $\mathcal B$ such that: +:$A = \bigcup \mathcal G_A$ and $B = \bigcup \mathcal G_B$ and $\mathcal G_A, \mathcal G_B \subseteq \mathcal B$ +Set $\mathcal G_C = \left\{{C \in \mathcal B: C \subseteq A \cap B}\right\}$ +By [[Union of Subsets is Subset]]: +:$\bigcup \mathcal G_C \subseteq A \cap B$ +We will prove the inclusion: $A \cap B \subseteq \bigcup \mathcal G_C$ +Let $x \in A \cap B$. +Then by definition of [[Definition:Set Intersection|intersection]]: +: $v \in A$ and $x \in B$ +Hence by definition of [[Definition:Set Union|union]]: +:$\exists D \in \mathcal G_A: x \in D$ +Analogically: +:$\exists E \in \mathcal G_B: x \in E$ +By definition of [[Definition:Subset|subset]] $D, E \in \mathcal B$ and by definition of [[Definition:Set Intersection|intersection]] $x \in D \cap E$. +Then by $(B1)$: +:$\exists U \in \mathcal B: x \in U \subseteq D \cap E$ +By [[Set is Subset of Union/Set of Sets]]: +:$D \subseteq A$ and $E \subseteq B$ +Then by [[Set Intersection Preserves Subsets]]: +:$D \cap E \subseteq A \cap B$ +Hence by [[Subset Relation is Transitive]]: +:$U \subseteq A \cap B$ +Then by definition of $\mathcal G_C$: +:$U \in \mathcal G_C$ +Thus by definition of [[Definition:Union of Set of Sets|union]]: +:$x \in \bigcup \mathcal G_C$ +This ends the proof of inclusion. +Then by definition of [[Definition:Set Equality|set equality]]: +:$A \cap B = \bigcup \mathcal G_C$ +By definition of [[Definition:Subset|subset]]: +:$\mathcal G_C \subseteq \mathcal B$ +Thus by definition of $\tau$: +:$A \cap B \in \tau$ +:$(O3): \quad S$ is an [[Definition:Element|element]] of $\tau$. +By $(B2)$ and definition of [[Definition:Union of Set of Sets|union]]: +:$\bigcup \mathcal B = X$ +Because $\mathcal B \subseteq \mathcal B$ by definition of $\tau$: +:$S \in \tau$ +It remains to prove that $\mathcal B$ is a [[Definition:Analytic Basis|basis]] of $T$. +Let $U$ be an [[Definition:Open Set (Topology)|open set]] of $S$. +Let $x$ be a [[Definition:Element|point]] of $S$ such that $x \in U$ +By definition of $\tau$ there exists $\mathcal G \subseteq \mathcal B$ such that: +:$ U = \bigcup \mathcal G$ +By definition of [[Definition:Union of Set of Sets|union]]: +:$\exists A \in \mathcal G: x \in A$ +By definition of [[Definition:Subset|subset]]: +$A \in \mathcal B$ +Thus by [[Set is Subset of Union]]: +:$x \in A \subseteq U$ +Thus the result by definition of [[Definition:Analytic Basis|basis]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Non-Invertible Matrix} +Tags: Matrix Theory + +\begin{theorem} +Let $\struct {R, +, \circ}$ be a [[Definition:Ring with Unity|ring with unity]]. +Let $n \in \Z_{>0}$ be a [[Definition:Strictly Positive Integer|(strictly) positive integer]]. +Let $\mathbf A$ be an [[Definition:Element|element]] of the [[Definition:Ring of Square Matrices|ring of square matrices]] $\struct {\map {\mathcal M_R} n, +, \times}$. +The following definitions for $\mathbf A$ to be [[Definition:Non-Invertible Matrix|non-invertible]] are [[Definition:Logical Equivalence|equivalent]]: +\end{theorem} + +\begin{proof} +Follows directly from [[Matrix is Invertible iff Determinant has Multiplicative Inverse]]. +{{qed}} +[[Category:Matrix Theory]] +2movrglnpt2ymh2r3u019rajq7tq63p +\end{proof}<|endoftext|> +\section{Matrix is Non-Invertible iff Product with Non-Zero Vector is Zero} +Tags: Matrix Theory + +\begin{theorem} +Let $\mathbf A$ be a [[Definition:Square Matrix|square matrix]] of [[Definition:Order of Square Matrix|order $n$]]. +Then $\mathbf A$ is [[Definition:Non-Invertible Matrix|non-invertible]] if there exists a [[Definition:Vector (Linear Algebra)|vector]] $\mathbf v$ of $n$ such that: +:$\mathbf v \ne \mathbf 0$ +:$\mathbf A \mathbf v = \mathbf 0$ +where $\mathbf 0$ is the [[Definition:Zero Vector|zero vector]]. +\end{theorem} + +\begin{proof} +{{proof wanted|The results to assemble this proof from probably already exist somewhere, as should this proof itself in some format or other in the Linear Algebra category.}} +\end{proof}<|endoftext|> +\section{Equivalence of Definitions of Integer Congruence} +Tags: Modulo Arithmetic + +\begin{theorem} +Let $m \in \Z_{> 0}$. +{{TFAE|def = Congruence (Number Theory)/Integers|view = congruence modulo $m$}} +\end{theorem} + +\begin{proof} +Let $x_1, x_2, z \in \Z$. +Let $x_1 \equiv x_2 \pmod z$ as defined by the [[Definition:Congruence (Number Theory)/Integers/Remainder after Division|equal remainder after division]]: +:$\RR_z = \set {\tuple {x, y} \in \Z \times \Z: \exists k \in \Z: x = y + k z}$ +Let $\tuple {x_1, x_2} \in \RR_z$. +Then by definition: +:$\exists k \in \Z: x_1 = x_2 + k z$ +So, by definition of the [[Definition:Modulo Operation|modulo operation]]: +{{begin-eqn}} +{{eqn | l = x_1 \mod z + | r = \paren {x_2 + k z} - z \floor {\frac {x_2 + k z} z} + | c = +}} +{{eqn | r = \paren {x_2 + k z} - z \floor {\frac {x_2} z + k} + | c = +}} +{{eqn | r = \paren {x_2 + k z} - z \floor {\frac {x_2} z} + k z + | c = +}} +{{eqn | r = x_2 - z \floor {\frac {x_2} z} + | c = +}} +{{eqn | r = x_2 \mod z + | c = +}} +{{end-eqn}} +So: +:$x_1 \equiv x_2 \pmod z$ +in the sense of [[Definition:Congruence (Number Theory)/Integers/Modulo Operation|definition by modulo operation]]. +{{qed|lemma}} +Now let $x_1 \equiv x_2 \pmod z$ in the sense of [[Definition:Congruence (Number Theory)/Integers/Modulo Operation|definition by modulo operation]]. +That is: +:$x_1 \equiv x_2 \pmod z \iff x_1 \mod z = x_2 \mod z$ +Let $z = 0$. +Then by definition, $x_1 \mod 0 = x_1$ and $x_2 \mod 0 = x_2$. +So as $x_1 \mod 0 = x_2 \mod 0$ we have that $x_1 = x_2$. +So: +:$x_1 - x_2 = 0 = 0.z$ +and so $x_1 \equiv x_2 \pmod z$ in the sense of [[Definition:Congruence (Number Theory)/Integers/Integer Multiple|definition by integer multiple]]. +Now suppose $z \ne 0$. +Then from definition of the [[Definition:Modulo Operation|modulo operation]]: +:$x_1 \mod z = x_1 - z \floor {\dfrac {x_1} z}$ +:$x_2 \mod z = x_2 - z \floor {\dfrac {x_2} z}$ +Thus: +:$x_1 - z \floor {\dfrac {x_1} z} = x_2 - z \floor {\dfrac {x_2} z}$ +and so: +:$x_1 - x_2 = z \paren {\floor {\dfrac {x_1} z} - \floor {\dfrac {x_2} z} }$ +From the definition of the [[Definition:Floor Function|floor function]], we see that both $\floor {\dfrac {x_1} z}$ and $\floor {\dfrac {x_2} z}$ are [[Definition:Integer|integers]]. +Therefore, so is $\floor {\dfrac {x_1} z} - \floor {\dfrac {x_2} z}$ an [[Definition:Integer|integer]]. +So $\exists k \in \Z: x_1 - x_2 = k z$. +Thus $x_1 - x_2 = k z$ and: +:$x_1 \equiv x_2 \pmod z$ +in the sense of [[Definition:Congruence (Number Theory)/Integers/Integer Multiple|definition by integer multiple]]. +{{qed|lemma}} +Now let $x_1 \equiv x_2 \pmod z$ in the sense of [[Definition:Congruence (Number Theory)/Integers/Integer Multiple|definition by integer multiple]]. +That is, $\exists k \in \Z: x_1 - x_2 = k z$. +Then $x_1 = x_2 + k z$ and so $\tuple {x_1, x_2} \in \RR_z$ where: +:$\RR_z = \set {\tuple {x, y} \in \R \times \R: \exists k \in \Z: x = y + k z}$ +and so +:$x_1 \equiv x_2 \pmod z$ +in the sense of [[Definition:Congruence (Number Theory)|definition by equivalence relation]]. +{{qed|lemma}} +So all three definitions are equivalent: $(1) \implies (2) \implies (3) \implies (1)$. +{{qed}} +\end{proof}<|endoftext|> +\section{Occurrence in Polish Notation has Unique Scope} +Tags: Formal Systems + +\begin{theorem} +Let $\mathcal F$ be a [[Definition:Formal Language|formal language]] in [[Definition:Polish Notation|Polish notation]]. +Let $\mathbf A$ be a [[Definition:Well-Formed Formula|well-formed formula]] of $\mathcal F$. +Let $a$ be an [[Definition:Occurrence (Formal Systems)|occurrence]] in $\mathbf A$. +Then $a$ has a unique [[Definition:Scope of Occurrence|scope]]. +\end{theorem} + +\begin{proof} +From the [[Definition:Polish Notation/Formal Definition|formal definition of Polish notation]], it follows that $a$ must be introduced by the [[Definition:Rule of Formation|rule of formation]]: +:$a \mathbf A_1 \cdots \mathbf A_n$ +for some [[Definition:Well-Formed Formula|well-formed formulas]] $\mathbf A_1, \ldots, \mathbf A_n$. +By [[Unique Readability for Polish Notation]], the $\mathbf A_i$ are uniquely determined. +Then $\mathbf A' = a \mathbf A_1 \cdots \mathbf A_n$ is a [[Definition:Well-Formed Part|well-formed part]] of $\mathbf A$. +Moreover, any [[Definition:Well-Formed Part|well-formed part]] of $\mathbf A$ containing $a$ must contain $\mathbf A'$. +Hence $\mathbf A'$ is the [[Definition:Scope of Occurrence|scope]] of $a$. +{{qed}} +\end{proof}<|endoftext|> +\section{Krull's Theorem} +Tags: Ideal Theory, Maximal Ideals of Rings + +\begin{theorem} +Let $R$ be a [[Definition:Non-Null Ring|non-null]] [[Definition:Ring with Unity|ring with unity]]. +Then $R$ has a [[Definition:Maximal Ideal of Ring|maximal ideal]]. +\end{theorem} + +\begin{proof} +Let $\struct {P, \subseteq}$ be the [[Definition:Ordered Set|ordered set]] consisting of all [[Definition:Proper Ideal of Ring|proper ideals]] of $R$, ordered by [[Definition:Subset|inclusion]]. +The theorem is proved by applying [[Zorn's Lemma]] to $P$. +First, we check that the conditions for [[Zorn's Lemma]] are met: $P$ must be [[Definition:Non-Empty Set|non-empty]], and every [[Definition:Non-Empty Set|non-empty]] [[Definition:Chain (Set Theory)|chain]] in $P$ must have an [[Definition:Upper Bound of Set|upper bound]]. +=== $P$ is [[Definition:Non-Empty Set|non-empty]] === +Since $R$ is [[Definition:Non-Null Ring|non-null]], the [[Definition:Zero Ideal of Ring|zero ideal]] is a [[Definition:Proper Ideal of Ring|proper ideal]] of $R$, and thus an [[Definition:Element|element]] of $P$. +=== Every [[Definition:Non-Empty Set|non-empty]] [[Definition:Chain (Set Theory)|chain]] in $P$ has an [[Definition:Upper Bound of Set|upper bound]] in $P$ === +Let $\sequence {I_\alpha}_{\alpha \mathop \in A}$ be a [[Definition:Non-Empty Set|non-empty]] [[Definition:Chain (Set Theory)|chain]] of ideals in $P$. +Let $\displaystyle I = \bigcup_{\alpha \mathop \in A} I_\alpha$. +We will show that $I$ is an [[Definition:Upper Bound of Set|upper bound]] in $P$ for the [[Definition:Chain (Set Theory)|chain]] $\sequence {I_\alpha}_{\alpha \mathop \in A}$. +==== $I$ is a [[Definition:Proper Ideal of Ring|proper ideal]] of $R$ ==== +By By [[Union of Chain of Proper Ideals is Proper Ideal]], $I$ is a [[Definition:Proper Ideal of Ring|proper ideal]] of $R$. +==== $I$ is an [[Definition:Upper Bound of Set|upper bound]] for the [[Definition:Chain (Set Theory)|chain]] $\sequence {I_\alpha}$ ==== +Since $I$ is a [[Definition:Proper Ideal of Ring|proper ideal]] of $R$, it is an [[Definition:Element|element]] of our [[Definition:Ordered Set|ordered set]] $P$. +$I$ is the [[Definition:Set Union|union]] of the $I_\alpha$, so $I_\alpha \subseteq I$ for all $\alpha \in A$. +This means that $I$ is an [[Definition:Upper Bound of Set|upper bound]] in $P$ for the [[Definition:Chain (Set Theory)|chain]] $\sequence {I_\alpha}_{\alpha \mathop \in A}$. +=== Applying [[Zorn's Lemma]] === +We have shown that the conditions for [[Zorn's Lemma]] are met: +:$(1): \quad P$ is [[Definition:Non-Empty Set|non-empty]] +:$(2): \quad$ every [[Definition:Non-Empty Set|non-empty]] [[Definition:Chain (Set Theory)|chain]] in $P$ has an [[Definition:Upper Bound of Set|upper bound]]. +Applying [[Zorn's Lemma]] to $\struct {P, \subseteq}$ gives us a [[Definition:Maximal Element|maximal element]] $M$. +This $M$ is a [[Definition:Proper Ideal of Ring|proper ideal]] of $R$ which is not contained in any other [[Definition:Proper Ideal of Ring|proper ideal]]. +So by definition, $M$ is a [[Definition:Maximal Ideal of Ring|maximal ideal]] of $R$. +{{qed}} +{{AoC|Zorn's Lemma}} +{{Namedfor|Wolfgang Krull|cat = Krull}} +[[Category:Ideal Theory]] +[[Category:Maximal Ideals of Rings]] +qz6mfnvslwx2t1vsb0tyu1x03xkh4md +\end{proof}<|endoftext|> +\section{Exclusive Or as Conjunction of Disjunctions} +Tags: Exclusive Or, Disjunction, Conjunction, Exclusive Or as Conjunction of Disjunctions + +\begin{theorem} +: $p \oplus q \dashv \vdash \left({p \lor q}\right) \land \left({\neg p \lor \neg q}\right)$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = p \oplus q + | o = \dashv \vdash + | r = \paren {p \lor q} \land \neg \paren {p \land q} + | c = {{Defof|Exclusive Or}} +}} +{{eqn | o = \dashv \vdash + | r = \paren {p \lor q} \land \paren {\neg p \lor \neg q} + | c = [[De Morgan's Laws (Logic)/Disjunction of Negations|De Morgan's Laws: Disjunction of Negations]] +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +We apply the [[Method of Truth Tables]]. +As can be seen by inspection, the [[Definition:Truth Value|truth values]] under the [[Definition:Main Connective (Propositional Logic)|main connectives]] match for all [[Definition:Boolean Interpretation|boolean interpretations]]. +$\begin{array}{|ccc||ccccccccc|} \hline +p & \oplus & q & (p & \lor & q) & \land & (\neg & p & \lor & \neg & q) \\ +\hline +F & F & F & F & F & F & F & T & F & T & T & F \\ +F & T & T & F & T & T & T & T & F & T & F & T \\ +T & T & F & T & T & F & T & F & T & T & T & F \\ +T & F & T & T & T & T & F & F & T & F & F & T \\ +\hline +\end{array}$ +{{qed}} +\end{proof}<|endoftext|> +\section{NAND as Disjunction of Negations} +Tags: Logical NAND, Disjunction, NAND as Disjunction of Negations + +\begin{theorem} +: $p \uparrow q \dashv \vdash \neg p \lor \neg q$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = p \uparrow q + | o = \dashv \vdash + | r = \neg \left({p \land q}\right) + | c = Definition of [[Definition:Logical NAND|Logical NAND]] +}} +{{eqn | o = \dashv \vdash + | r = \neg p \lor \neg q + | c = [[De Morgan's Laws (Logic)/Disjunction of Negations|De Morgan's Laws: Disjunction of Negations]] +}} +{{end-eqn}} +{{qed}} +\end{proof} + +\begin{proof} +We apply the [[Method of Truth Tables]]. +As can be seen by inspection, the [[Definition:Truth Value|truth values]] under the [[Definition:Main Connective (Propositional Logic)|main connectives]] match for all [[Definition:Boolean Interpretation|boolean interpretations]]. +$\begin{array}{|ccc||ccccc|} \hline +p & \uparrow & q & \neg & p & \lor & \neg & q \\ +\hline +F & T & F & T & F & T & T & F \\ +F & T & T & T & F & T & F & T \\ +T & T & F & F & T & T & T & F \\ +T & F & T & F & T & F & F & T \\ +\hline +\end{array}$ +{{qed}} +\end{proof}<|endoftext|> +\section{Rule of Idempotence/Disjunction/Formulation 2/Forward Implication} +Tags: Rule of Idempotence + +\begin{theorem} +: $\vdash p \implies \left({p \lor p}\right)$ +\end{theorem} + +\begin{proof} +{{BeginTableau|p \implies \left({p \lor p}\right)}} +{{Assumption|1|p}} +{{Addition|2|1|p \lor p|1|1}} +{{Implication|3||p \implies \left({p \lor p}\right)|1|2}} +{{EndTableau}} +{{qed}} +[[Category:Rule of Idempotence]] +du69f7dq0oytomeg2rutl0ws3af7fwe +\end{proof}<|endoftext|> +\section{Rule of Idempotence/Disjunction/Formulation 2/Reverse Implication} +Tags: Rule of Idempotence + +\begin{theorem} +: $\vdash \left({p \lor p}\right) \implies p$ +\end{theorem} + +\begin{proof} +{{BeginTableau|\left({p \lor p}\right) \implies p}} +{{Premise|1|p \lor p}} +{{Assumption|2|p}} +{{ProofByCases|3|1|p|1|2|2|2|2}} +{{Implication|4||\left({p \lor p}\right) \implies p|1|3}} +{{EndTableau}} +{{qed}} +\end{proof}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 1/Form 1} +Tags: Rule of Addition + +\begin{theorem} +:$p \vdash p \lor q$ +\end{theorem}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 1} +Tags: Rule of Addition + +\begin{theorem} +:$(1): \quad p \vdash p \lor q$ +:$(2): \quad q \vdash p \lor q$ +\end{theorem}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 1/Form 2} +Tags: Rule of Addition + +\begin{theorem} +:$q \vdash p \lor q$ +\end{theorem}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 1/Form 2/Proof 2} +Tags: Truth Table Proofs, Rule of Addition + +\begin{theorem} +:$q \vdash p \lor q$ +\end{theorem} + +\begin{proof} +We apply the [[Method of Truth Tables]]. +$\begin{array}{|c||ccc|} \hline +q & p & \lor & q \\ +\hline +F & F & F & F \\ +T & F & T & T \\ +F & T & T & F \\ +T & T & T & T \\ +\hline +\end{array}$ +As can be seen, when $q$ is [[Definition:True|true]] so is $p \lor q$. +{{qed}} +\end{proof}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 2/Form 1} +Tags: Rule of Addition + +\begin{theorem} +:$\vdash p \implies \left({p \lor q}\right)$ +\end{theorem}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 2/Form 2} +Tags: Rule of Addition + +\begin{theorem} +:$\vdash q \implies \left({p \lor q}\right)$ +\end{theorem}<|endoftext|> +\section{Rule of Addition/Sequent Form/Formulation 2} +Tags: Rule of Addition + +\begin{theorem} +:$(1): \quad \vdash p \implies \paren {p \lor q}$ +:$(2): \quad \vdash q \implies \paren {p \lor q}$ +\end{theorem}<|endoftext|> +\section{Hilbert Proof System Instance 2 is Consistent} +Tags: Formal Systems + +\begin{theorem} +[[Definition:Hilbert Proof System/Instance 2|Instance 2]] of the [[Definition:Hilbert Proof System|Hilbert proof systems]] $\mathscr H_2$ is [[Definition:Consistent Proof System|consistent]]. +\end{theorem} + +\begin{proof} +Consider [[Definition:Constructed Semantics/Instance 1|Instance 1]] of a [[Definition:Constructed Semantics|constructed semantics]], denoted $\mathscr C_1$. +Note that $\neg p$ is not a [[Definition:Tautology (Formal Semantics)|tautology]] for $\mathscr C_1$. +We will establish that every $\mathscr H_2$-[[Definition:Theorem (Formal Systems)|theorem]] is a $\mathscr C_1$-[[Definition:Tautology (Formal Semantics)|tautology]]. +That is, that $\mathscr H_2$ is [[Definition:Sound Proof System|sound]] for $\mathscr C_1$. +Starting with the [[Definition:Axiom (Formal Systems)|axioms]]: +{{begin-axiom}} +{{axiom|n = A1 + |lc = [[Rule of Idempotence/Disjunction/Formulation 2/Reverse Implication|Rule of Idempotence]] + |m = (p \lor p) \implies p + |rc = [[Definition:Constructed Semantics/Instance 1/Rule of Idempotence|Proof of Tautology]] +}} +{{axiom|n = A2 + |lc = [[Rule of Addition/Sequent Form/Formulation 2/Form 2|Rule of Addition]] + |m = q \implies (p \lor q) + |rc = [[Definition:Constructed Semantics/Instance 1/Rule of Addition|Proof of Tautology]] +}} +{{axiom|n = A3 + |lc = [[Rule of Commutation/Disjunction/Formulation 2/Forward Implication|Rule of Commutation]] + |m = (p \lor q) \implies (q \lor p) + |rc = [[Definition:Constructed Semantics/Instance 1/Rule of Commutation|Proof of Tautology]] +}} +{{axiom|n = A4 + |lc = [[Factor Principles/Disjunction on Left/Formulation 2|Factor Principle]] + |m = (q \implies r) \implies \left({ (p \lor q) \implies (p \lor r)}\right) + |rc = [[Definition:Constructed Semantics/Instance 1/Factor Principle|Proof of Tautology]] +}} +{{end-axiom}} +Next it needs to be shown that the [[Definition:Hilbert Proof System/Instance 2|rules of inference of $\mathscr H_2$]] preserve $\mathscr C_1$-[[Definition:Tautology (Formal Semantics)|tautologies]]. +=== Rule $RST \, 1$: Rule of Uniform Substitution === +By definition, any [[Definition:WFF of Propositional Logic|WFF]] is assigned a value $1$ or $2$. +Thus, in applying [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 1$]], we are introducing $1$ or $2$ in the position of a [[Definition:Propositional Variable|propositional variable]]. +But all possibilities of assignments of $1$s and $2$s to such [[Definition:Propositional Variable|propositional variables]] were shown not to affect the resulting value $2$ of the axioms. +Hence [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 1$]] preserves $\mathscr C_1$-[[Definition:Tautology (Formal Semantics)|tautologies]]. +=== Rule $RST \, 2$: Rule of Substitution by Definition === +Because the definition of $\mathscr C_1$ was given in terms of [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 2$]], it cannot affect any of its results. +=== Rule $RST \, 3$: Rule of Detachment === +Suppose $\mathbf A$ and $\mathbf A \implies \mathbf B$ both take value $2$. +Then using [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 2$]], definition $(2)$, we get: +:$\neg \mathbf A \lor \mathbf B$ +taking value $2$ by assumption. +But $\neg \mathbf A$ takes value $1$ by definition of $\neg$. +So from the definition of $\lor$ it must be that $\mathbf B$ takes value $2$. +Hence [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 3$]] also produces only [[Definition:WFF of Propositional Logic|WFFs]] of value $2$. +=== Rule $RST \, 4$: Rule of Adjunction === +Suppose $\mathbf A$ and $\mathbf B$ take value $2$. +Then: +{{begin-eqn}} +{{eqn|l = \mathbf A \land \mathbf B + |r = 2 \land 2 +}} +{{eqn|r = \neg ( \neg 2 \lor \neg 2 ) + |c = [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 2 \, (1)$]] +}} +{{eqn|r = \neg ( 1 \lor 1 ) +}} +{{eqn|r = \neg 1 +}} +{{eqn|r = 2 +}} +{{end-eqn}} +proving that [[Definition:Hilbert Proof System/Instance 2|Rule $RST \, 4$]] also produces only $2$s from $2$s. +Hence $\mathscr H_2$ is [[Definition:Sound Proof System|sound]] for $\mathscr C_1$. +In particular: +:$\not\vdash_{\mathscr H_2} \neg p$ +Hence $\mathscr H_2$ is [[Definition:Consistent Proof System|consistent]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Set of Local Minimum is Countable} +Tags: Real Analysis, Countable Sets + +\begin{theorem} +Let $X$ be a [[Definition:Subset|subset]] of $\R$. +The set: +:$\leftset {x \in X: x}$ is [[Definition:Local Minimum in Set of Reals|local minimum in]] $\rightset X$ +is [[Definition:Countable Set|countable]]. +\end{theorem} + +\begin{proof} +Define: +:$Y := \leftset {x \in X: x}$ is [[Definition:Local Minimum in Set of Reals|local minimum in]] $\rightset X$ +By definition of $Y$ and definition of [[Definition:Local Minimum in Set of Reals|local minimum in set]]: +:$\forall x \in Y: \exists y \in \R: y < x \land \openint y x \cap X = \O$ +By the [[Axiom:Axiom of Choice|Axiom of Choice]], define a [[Definition:Mapping|mapping]] $f: Y \to \powerset \R$ as: +:$\forall x \in Y: \exists y \in \R: \map f x = \openint y x \land y < x \land \map f x \cap X = \O$ +We will prove that $f$ is an [[Definition:Injection|injection]] by definition: +Let $x_1, x_2 \in Y$ such that +:$\map f {x_1} = \map f {x_2}$ +By definition of $f$: +:$\exists y_1 \in \R: \map f {x_1} = \openint {y_1} {x_1} \land y_1 < x_1 \land \map f {x_1} \cap X = \O$ +and: +:$\exists y_2 \in \R: \map f {x_2} = \openint {y_2} {x_2} \land y_2 < x_2 \land \map f {x_2} \cap X = \O$ +Then: +:$\openint {y_1} {x_1} = \openint {y_2} {x_2}$ +Thus $x_1 = x_2$. +This ends the proof of [[Definition:Injection|injection]]. +By [[Cardinality of Image of Injection]]: +:$(1): \quad \card Y = \card {\map {f^\to} Y} = \card {\Img f}$ +where +:$\card Y$ denotes the [[Definition:Cardinality|cardinality]] of $Y$, +:$\map {f^\to} Y$ denotes the [[Definition:Image of Subset under Mapping|image of $Y$ under $f$]], +:$\Img f$ denotes the [[Definition:Image of Mapping|image]] of $f$. +We will prove that $\Img f$ is [[Definition:Pairwise Disjoint|pairwise disjoint]] by definition. +Let $A, B \in \Img f$ such that +:$A \ne B$. +Then by definition of [[Definition:Image of Mapping|image]]: +:$\exists x_1 \in Y: \map f {x_1} = A$ +and +:$\exists x_2 \in Y: \map f {x_2} = B$. +By difference of $A$ and $B$: +:$x_1 \ne x_2$ +By definition of $f$: +:$\exists y_1 \in \R: \map f {x_1} = \openint {y_1} {x_1} \land y_1 < x_1 \land \map f {x_1} \cap X = \O$ +and: +:$\exists y_2 \in \R: \map f {x_2} = \openint {y_2} {x_2} \land y_2 < x_2 \land \map f {x_2} \cap X = \O$ +Aiming at contradiction suppose that +:$A \cap B \ne \O$. +$x_1 < x_2$ or $x_1 > x_2$. +In case when $x_1 < x_2$, $x_1 \in \map f {x_2}$ what contradicts with $\map f {x_2} \cap X = \O$. +In case when $x_1 > x_2$, analogically. +This ends the proof that $\Img f$ is [[Definition:Pairwise Disjoint|pairwise disjoint]]. +By [[Set of Pairwise Disjoint Intervals is Countable]]: +:$\Img f$ is [[Definition:Countable Set|countable]]. +Thus by $(1)$ and [[Set is Countable if Cardinality equals Cardinality of Countable Set]] the result: +:$Y$ is [[Definition:Countable Set|countable]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Set of Pairwise Disjoint Intervals is Countable} +Tags: Countable Sets + +\begin{theorem} +Let $X$ be a [[Definition:Subset|subset]] of $\mathcal P \left({\R}\right)$ such that: +:$(1): \quad X$ is [[Definition:Pairwise Disjoint|pairwise disjoint]]: +::::$\forall A,B \in X: A \ne B \implies A \cap B = \varnothing$. +:$(2): \quad$ every [[Definition:Element|element]] of $X$ [[Definition:Superset|contains]] an [[Definition:Open Real Interval|open interval]]: +::::$\forall A \in X: \exists x, y \in \R: x < y \land \left({x \,.\,.\, y}\right) \subseteq A$. +Then $X$ is [[Definition:Countable Set|countable]]. +\end{theorem} + +\begin{proof} +By [[Between two Real Numbers exists Rational Number]]: +:$\forall A \in X: \exists x, y \in \R, q \in \Q: x < y \land q \in \left({x \,.\,.\, y}\right) \subseteq A$ +By the [[Axiom:Axiom of Choice|Axiom of Choice]] define a [[Definition:Mapping|mapping]] $f: X \to \Q$: +:$\forall A \in X: f \left({A}\right) \in A$ +First it needs to be shown that $f$ is an [[Definition:Injection|injection]] by definition. +Let $A, B \in X$ such that: +:$f \left({A}\right) = f \left({B}\right)$ +By definition of $f$: +:$f \left({A}\right) \in A$ and $f \left({B}\right) \in B$ +By definition of [[Definition:Set Intersection|intersection]]: +:$f \left({A}\right) \in A \cap B$ +Then by definition of [[Definition:Empty Set|empty set]]: +:$A \cap B \ne \varnothing$ +Thus by definition of [[Definition:Pairwise Disjoint|pairwise disjoint]]: +:$A = B$ +Hence $f$ is an [[Definition:Injection|injection]]. +By [[Set is Subset of Itself]], $X$ is a [[Definition:Subset|subset]] of $X$. +Thus by [[Cardinality of Image of Injection]]: +:$\left\vert{X}\right\vert = \left\vert{f^\to \left({X}\right)}\right\vert$ +By definition of [[Definition:Image of Subset under Mapping|image]]: +:$f^\to \left({X}\right) \subseteq \Q$ +By [[Rational Numbers are Countably Infinite]]: +:$\Q$ is [[Definition:Countable Set|countable]]. +Hence by [[Subset of Countable Set is Countable]]: +:$f^\to \left({X}\right)$ is [[Definition:Countable Set|countable]]. +Thus by [[Set is Countable if Cardinality equals Cardinality of Countable Set]] the result: +:$X$ is [[Definition:Countable Set|countable]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Set is Countable if Cardinality equals Cardinality of Countable Set} +Tags: Countable Sets + +\begin{theorem} +Let $X, Y$ be [[Definition:Set|sets]]. +Let: +: $\left\vert{X}\right\vert = \left\vert{Y}\right\vert$ +where $\left\vert{X}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $X$. +If $X$ is [[Definition:Countable Set|countable]] then $Y$ is [[Definition:Countable Set|countable]]. +\end{theorem} + +\begin{proof} +Assume that $X$ is [[Definition:Countable Set|countable]]. +By definition of [[Definition:Countable Set|countable set]] there exists an [[Definition:Injection|injection]]:L +:$f: X \to \N$ +By definition of [[Definition:Cardinality|cardinality]] the sets $Y$ and $X$ are [[Definition:Set Equivalence|equivalent]]: +:$Y \sim X$ +Then by definition of [[Definition:Set Equivalence|set equivalence]] there exists a [[Definition:Bijection|bijection]]: +:$g: Y \to X$ +By definition of [[Definition:Bijection|bijection]]: +:$g$ is an [[Definition:Injection|injection]]. +Hence by [[Composite of Injections is Injection]]: +:$f \circ g: Y \to \N$ is an [[Definition:Injection|injection]]. +Thus by definition: +: $Y$ is [[Definition:Countable Set|countable]]. +{{qed}} +[[Category:Countable Sets]] +5lec0t36mj905rmtdx7evd47l2rwp68 +\end{proof}<|endoftext|> +\section{Double Negation/Double Negation Introduction/Sequent Form} +Tags: Double Negation + +\begin{theorem} +{{:Double Negation/Double Negation Introduction/Sequent Form/Formulation 1}} +\end{theorem}<|endoftext|> +\section{Double Negation/Double Negation Elimination/Proof Rule} +Tags: Proof Rules, Double Negation + +\begin{theorem} +:If we can conclude $\neg \neg \phi$, then we may infer $\phi$. +\end{theorem}<|endoftext|> +\section{Double Negation/Double Negation Elimination/Formulation 1/Sequent Form} +Tags: Double Negation + +\begin{theorem} +{{:Double Negation/Double Negation Elimination/Sequent Form/Formulation 1}} +\end{theorem}<|endoftext|> +\section{Biconditional Elimination/Also known as} +Tags: Biconditional Elimination + +\begin{theorem} +Some sources refer to the [[Biconditional Elimination]] as the rule of '''Biconditional-Conditional'''. +\end{theorem}<|endoftext|> +\section{Biconditional Elimination/Proof Rule} +Tags: Proof Rules, Biconditional Elimination + +\begin{theorem} +:$(1): \quad$ If we can conclude $\phi \iff \psi$, then we may infer $\phi \implies \psi$. +:$(2): \quad$ If we can conclude $\phi \iff \psi$, then we may infer $\psi \implies \phi$. +\end{theorem}<|endoftext|> +\section{Principle of Non-Contradiction/Explanation} +Tags: Principle of Non-Contradiction + +\begin{theorem} +The '''[[Principle of Non-Contradiction]]''' can be expressed in [[Definition:Natural Language|natural language]] as follows: +:A [[Definition:Statement|statement]] can not be both [[Definition:True|true]] and not [[Definition:True|true]] at the same time. +This means: if we have managed to deduce that a [[Definition:Statement|statement]] is both [[Definition:True|true]] and [[Definition:False|false]], then the sequence of deductions show that the [[Definition:Pool of Assumptions|pool of assumptions]] upon which the [[Definition:Sequent|sequent]] rests contains [[Definition:Assumption|assumptions]] which are mutually [[Definition:Contradiction|contradictory]]. +Thus it provides a means of eliminating a [[Definition:Logical Not|logical not]] from a [[Definition:Sequent|sequent]]. +\end{theorem}<|endoftext|> +\section{Principle of Non-Contradiction/Proof Rule} +Tags: Proof Rules, Principle of Non-Contradiction + +\begin{theorem} +:If we can conclude both $\phi$ and $\neg \phi$, we may infer a [[Definition:Contradiction|contradiction]]. +\end{theorem}<|endoftext|> +\section{Principle of Non-Contradiction/Sequent Form} +Tags: Principle of Non-Contradiction + +\begin{theorem} +{{:Principle of Non-Contradiction/Sequent Form/Formulation 1}} +\end{theorem}<|endoftext|> +\section{De Morgan's Laws (Logic)/Conjunction/Definition} +Tags: De Morgan's Laws (Logic) + +\begin{theorem} +:$p \land q := \neg \left({\neg p \lor \neg q}\right)$ +\end{theorem}<|endoftext|> +\section{Rule of Material Implication/Definition} +Tags: Disjunction, Implication, Negation + +\begin{theorem} +:$p \implies q := \neg p \lor q$ +\end{theorem}<|endoftext|> +\section{Factorization of Natural Numbers within 4 n + 1 not Unique} +Tags: Number Theory + +\begin{theorem} +Let: +:$S = \set {4 n + 1: n \in \N} = \set {1, 5, 9, 13, 17, \ldots}$ +be the [[Definition:Set|set]] of [[Definition:Natural Number|natural numbers]] of the form $4 n + 1$. +Then not all [[Definition:Element|elements]] of $S$ have a [[Definition:Complete Factorization|complete factorization]] by other [[Definition:Element|elements]] of $S$ which is [[Definition:Unique|unique]]. +\end{theorem} + +\begin{proof} +[[Proof by Counterexample]]: +Consider the [[Definition:Natural Number|number]]: +:$m = 693 = 3^2 \times 7 \times 11$ +Thus: +:$m = 9 \times 77 = 21 \times 33$ +We have that: +{{begin-eqn}} +{{eqn | l = 9 + | r = 4 \times 2 + 1 + | rr= \in S +}} +{{eqn | l = 77 + | r = 4 \times 19 + 1 + | rr= \in S +}} +{{eqn | l = 21 + | r = 4 \times 5 + 1 + | rr= \in S +}} +{{eqn | l = 33 + | r = 4 \times 8 + 1 + | rr= \in S +}} +{{end-eqn}} +The [[Definition:Divisor of Integer|divisors]] of these numbers are as follows: +{{begin-eqn}} +{{eqn | l = 9 + | r = 3^2 + | c = where $3 \notin S$ +}} +{{eqn | l = 77 + | r = 7 \times 11 + | c = where $7 \notin S$ and $11 \notin S$ +}} +{{eqn | l = 21 + | r = 3 \times 7 + | c = where $3 \notin S$ and $7 \notin S$ +}} +{{eqn | l = 33 + | r = 3 \times 11 + | c = where $3 \notin S$ and $11 \notin S$ +}} +{{end-eqn}} +Thus $693$ has two different [[Definition:Complete Factorization|complete factorizations]] into [[Definition:Element|elements]] of $S$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Solutions of Pythagorean Equation/Primitive} +Tags: Solutions of Pythagorean Equation + +\begin{theorem} +The [[Definition:Set|set]] of all [[Definition:Primitive Pythagorean Triple|primitive Pythagorean triples]] is generated by: +:$\tuple {2 m n, m^2 - n^2, m^2 + n^2}$ +where: +:$m, n \in \Z_{>0}$ are [[Definition:Strictly Positive Integer|(strictly) positive integers]] +:$m \perp n$, that is, $m$ and $n$ are [[Definition:Coprime Integers|coprime]] +:$m$ and $n$ are of [[Definition:Parity of Integer|opposite parity]] +:$m > n$ +\end{theorem}<|endoftext|> +\section{Solutions of Pythagorean Equation/General} +Tags: Solutions of Pythagorean Equation + +\begin{theorem} +Let $x, y, z$ be a solution to the [[Definition:Pythagorean Equation|Pythagorean equation]]. +Then $x = k x', y = k y', z = k z'$, where: +:$\tuple {x', y', z'}$ is a [[Definition:Primitive Pythagorean Triple|primitive Pythagorean triple]] +:$k \in \Z: k \ge 1$ +\end{theorem} + +\begin{proof} +Let $\tuple {x, y, z}$ be non-[[Definition:Primitive Pythagorean Triple|primitive solution]] to the [[Definition:Pythagorean Equation|Pythagorean equation]]. +Let: +:$\exists k \in \Z: k \ge 2, k \divides x, k \divides y$ +such that $x \perp y$. +Then we can express $x$ and $y$ as $x = k x', y = k y'$. +Thus: +:$z^2 = k^2 x'^2 + k^2 y'^2 = k^2 z'^2$ +for some $z' \in \Z$. +Let: +:$\exists k \in \Z: k \ge 2, k \divides x, k \divides z$ +such that $x \perp z$ +Then we can express $x$ and $z$ as $x = k x', z = k z'$. +Thus: +:$y^2 = k^2 z'^2 - k^2 x'^2 = k^2 y'^2$ +for some $y' \in \Z$. +Similarly for any [[Definition:Common Divisor of Integers|common divisor]] of $y$ and $z$. +Thus any [[Definition:Common Divisor of Integers|common divisor]] of any pair of $x, y, z$ has to be a [[Definition:Common Divisor of Integers|common divisor of Integers]] of the other. +Hence any non-[[Definition:Primitive Pythagorean Triple|primitive solution]] to the [[Definition:Pythagorean Equation|Pythagorean equation]] is a constant multiple of some [[Definition:Primitive Pythagorean Triple|primitive solution]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Goldbach Conjecture implies Goldbach's Marginal Conjecture} +Tags: Prime Numbers, Goldbach Conjecture + +\begin{theorem} +Suppose the [[Goldbach Conjecture]] holds: +:Every [[Definition:Even Integer|even integer]] greater than $2$ is the sum of two [[Definition:Prime Number|primes]]. +Then [[Goldbach's Marginal Conjecture]] follows: +:Every [[Definition:Integer|integer]] greater than $5$ can be written as the sum of three [[Definition:Prime Number|primes]]. +\end{theorem} + +\begin{proof} +Suppose the [[Goldbach Conjecture]] holds. +Let $n \in \Z$ such that $n > 5$. +Let $n$ be an [[Definition:Odd Integer|odd integer]]. +Then $n - 3$ is an [[Definition:Even Integer|even integer]] greater than $2$. +By the [[Goldbach Conjecture]]: +:$n - 3 = p_1 + p_2$ +where $p_1$ and $p_2$ are both [[Definition:Prime Number|primes]]. +Then: +:$n = p_1 + p_2 + 3$ +As $3$ is [[Definition:Prime Number|prime]], the result follows. +Let $n$ be an [[Definition:Even Integer|even integer]]. +Then $n - 2$ is an [[Definition:Even Integer|even integer]] greater than $3$ and so greater than $2$. +By the [[Goldbach Conjecture]]: +:$n - 2 = p_1 + p_2$ +where $p_1$ and $p_2$ are both [[Definition:Prime Number|primes]]. +Then: +:$n = p_1 + p_2 + 2$ +As $2$ is [[Definition:Prime Number|prime]], the result follows. +{{qed}} +[[Category:Prime Numbers]] +[[Category:Goldbach Conjecture]] +ru92lcffn8n43wrt1rqdv6uzsj1evo7 +\end{proof}<|endoftext|> +\section{Infinite Number of Chen Primes} +Tags: Prime Numbers + +\begin{theorem} +There exists an [[Definition:Infinite Set|infinite number]] of [[Definition:Chen Prime|Chen primes]]. +That is, there exists an [[Definition:Infinite Set|infinite number]] of [[Definition:Doubleton|pairs]]: +:$\set {p, p + 2}$ +where: +:$p$ is a [[Definition:Prime Number|prime]] +:$p + 2$ is either a [[Definition:Prime Number|prime]] or a [[Definition:Semiprime Number|semiprime]]. +\end{theorem}<|endoftext|> +\section{Congruent Integers are of same Quadratic Character} +Tags: Quadratic Residues + +\begin{theorem} +Let $p$ be an [[Definition:Odd Prime|odd prime]]. +Let $a \in \Z$ be an [[Definition:Integer|integer]] such that $a \not \equiv 0 \pmod p$. +Let $a \equiv b \pmod p$. +Then $a$ and $b$ have the same [[Definition:Quadratic Character|quadratic character]]. +\end{theorem} + +\begin{proof} +Let $a \equiv b \pmod p$. +Then by [[Congruence of Powers]]: +:$a^2 \equiv b^2 \pmod p$ +Hence: +:$x^2 \equiv a \pmod p$ has a solution {{iff}} $x^2 \equiv b \pmod p$. +Hence the result. +{{qed}} +[[Category:Quadratic Residues]] +rqg7tzzvmszbgwmoy21zvr5o7qdyw94 +\end{proof}<|endoftext|> +\section{Weight of Sorgenfrey Line is Continuum} +Tags: Sorgenfrey Line + +\begin{theorem} +Let $T = \struct {\R, \tau}$ be the [[Definition:Sorgenfrey Line|Sorgenfrey line]]. +Then $\map w T = \mathfrak c$ +where +:$\map w T$ denotes the [[Definition:Weight of Topological Space|weight]] of $T$ +:$\mathfrak c$ denotes [[Definition:Cardinality of Continuum|continuum]], the [[Definition:Cardinality|cardinality]] of [[Definition:Real Number|real numbers]]. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Sorgenfrey Line|Sorgenfrey line]], the [[Definition:Set|set]]: +:$\BB = \set {\hointr x y: x, y \in \R \land x < y}$ +is a [[Definition:Analytic Basis|basis]] of $T$. +By definition of [[Definition:Weight of Topological Space|weight]]: +:$\map w T \le \card \BB$ +where $\card \BB$ denotes the [[Definition:Cardinality|cardinality]] of $\mathcal B$. +By [[Cardinality of Basis of Sorgenfrey Line not greater than Continuum]]: +:$\card \BB \le \mathfrak c$ +Thus +:$\map w T \le \mathfrak c$ +It remains to show that: +:$\mathfrak c \le \map w T$ +Aiming for a contradiction, suppose that +:$\mathfrak c \not \le \map w T$ +Then: +:$\map w T < \mathfrak c$ +By definition of [[Definition:Weight of Topological Space|weight]], there exists a [[Definition:Analytic Basis|basis]] $\BB_0$ of $T$: +:$\map w T = \card {\BB_0}$ +Then by [[Set of Subset of Reals with Cardinality less than Continuum has not Interval in Union Closure]]: +:$\exists x, y \in \R: x < y \land \hointr x y \notin \set {\bigcup A: A \subseteq \BB_0} = \tau$ +By definition of $\BB$: +:$\hointr x y \in \BB \subseteq \tau$ +Thus this contradicts by definition of [[Definition:Subset|subset]] with: +:$\hointr x y \notin \tau$ +{{qed}} +\end{proof}<|endoftext|> +\section{Construction of Regular Heptadecagon} +Tags: Regular Polygons, 17 + +\begin{theorem} +It is possible to construct a [[Definition:Regular Heptadecagon|regular hepadecagon]] (that is, a [[Definition:Regular Polygon|regular polygon]] with $17$ [[Definition:Side of Polygon|sides]]) using a [[Definition:Compass and Straightedge Construction|compass and straightedge construction]]. +\end{theorem} + +\begin{proof} +It remains to be demonstrated that the [[Definition:Line Segment|line segment]] $NM$ is the [[Definition:Side of Polygon|side]] of a [[Definition:Regular Heptadecagon|regular hepadecagon]] [[Definition:Polygon Inscribed in Circle|inscribed]] in [[Definition:Circle|circle]] $ACB$. +This will be done by demonstrating that $\angle LOM$ is equal to $\dfrac {2 \pi} {17}$ [[Definition:Radian|radians]], that is, $\dfrac 1 {17}$ of the full [[Definition:Circle|circle]] $ACB$. +For convenience, let the [[Definition:Radius of Circle|radius]] $OA$ be equal to $4 a$. +By [[Pythagoras's Theorem]], $AD = a \sqrt {17}$. +By definition of [[Definition:Tangent of Angle|tangent]], $OE = a \arctan \left({\dfrac {\angle ODA} 4}\right)$. +By construction, $\angle EDF = \dfrac \pi 4$ [[Definition:Radian|radians]]. +Thus: +{{begin-eqn}} +{{eqn | l = \frac {\tan \angle ODE + \tan \angle ODF} {1 - \tan \angle ODE \tan \angle ODF} + | r = \tan \angle EDF + | c = [[Tangent of Sum]] +}} +{{eqn | r = 1 + | c = [[Tangent of 45 Degrees|Tangent of $\dfrac \pi 4$]] +}} +{{end-eqn}} +{{ProofWanted|finish off -- the algebra gets complicated from here on in.}} +\end{proof}<|endoftext|> +\section{Set of Subset of Reals with Cardinality less than Continuum has not Interval in Union Closure} +Tags: Infinite Sets + +\begin{theorem} +Let $\mathcal B$ be a [[Definition:Set of Sets|set]] of [[Definition:Subset|subsets]] of $\R$, the [[Definition:Set|set]] of all [[Definition:Real Number|real numbers]]. +Let: +:$\left\vert{\mathcal B}\right\vert < \mathfrak c$ +where +:$\left\vert{\mathcal B}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $\mathcal B$ +:$\mathfrak c = \left\vert{\R}\right\vert$ denotes [[Definition:Cardinality of Continuum|continuum]]. +Then: +:$\exists x, y \in \R: x < y \land \left[{x \,.\,.\, y}\right) \notin \left\{{\bigcup \mathcal G: \mathcal G \subseteq \mathcal B}\right\}$ +\end{theorem} + +\begin{proof} +Define: +:$\mathcal F = \left\{{\bigcup \mathcal G: \mathcal G \subseteq \mathcal B}\right\}$ +Define: +:$ Z = \left\{{x \in \R: \exists U \in \mathcal F: x}\right.$ is local minimum in $\left.U\right\}$ +By [[Set of Subsets of Reals with Cardinality less than Continuum Cardinality of Local Minimums of Union Closure less than Continuum]]: +:$\left\vert{Z}\right\vert < \mathfrak c$ +Then by [[Cardinalities form Inequality implies Difference is Nonempty]]: +:$\R \setminus Z \ne \varnothing$ +Hence by definition of [[Definition:Empty Set|empty set]]: +:$\exists z: z \in \R \setminus Z$ +By definition of [[Definition:Set Difference|difference]]: +:$z \in \R \land z \notin Z$ +Thus $z < z+1$. +We will show that $z$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $\left[{z \,.\,.\, z+1}\right)$. +Thus: +:$z \in \left[{z \,.\,.\, z+1}\right)$ +Hence: +: $z-1 < z$ +Thus: +: $\left({z-1 \,.\,.\, z}\right) \cap \left[{z \,.\,.\, z+1}\right) = \varnothing$ +Then by definition $z$ is a [[Definition:Local Minimum in Set of Reals|local minimum]] in $\left[{z \,.\,.\, z+1}\right)$. +Because $z \notin Z$: +:$\left[{z \,.\,.\, z+1}\right) \notin \mathcal F$ +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Basis of Sorgenfrey Line not greater than Continuum} +Tags: Sorgenfrey Line + +\begin{theorem} +Let $T = \struct {\R, \tau}$ be the [[Definition:Sorgenfrey Line|Sorgenfrey line]]. +Let +:$\BB = \set {\hointr x y: x, y \in \R \land x < y}$ +be the [[Definition:Analytic Basis|basis]] of $T$. +Then $\card \BB \le \mathfrak c$ +where +:$\card \BB$ denotes the [[Definition:Cardinality|cardinality]] of $\BB$ +:$\mathfrak c = \card \R$ denotes the [[Definition:Cardinality of Continuum|continuum]]. +\end{theorem} + +\begin{proof} +Define a [[Definition:Mapping|mapping]] $f: \BB \to \R \times \R$: +:$\forall I \in \BB: \map f I = \tuple {\min I, \sup I}$ +That is: +:$\map f {\hointr x y} = \tuple {x, y} \forall x, y \in \R: x < y$ +We will show that $f$ is an [[Definition:Injection|injection]] by definition. +Let $I_1, I_2 \in \BB$ such that: +:$\map f {I_1} = \map f {I_2}$ +{{begin-eqn}} +{{eqn | l = I_1 + | r = \hointr {\min I_1} {\sup I_1} + | c = {{Defof|Half-Open Real Interval}} +}} +{{eqn | r = \hointr {\min I_2} {\sup I_2} + | c = by $\map f {I_1} = \map f {I_2}$ +}} +{{eqn | r = I_2 + | c = {{Defof|Half-Open Real Interval}} +}} +{{end-eqn}} +So: +:$I_1 = I_2$ +Thus $f$ is an [[Definition:Injection|injection]]. +By [[Injection implies Cardinal Inequality]]: +:$\card \BB \le \card {\R \times \R}$ +By [[Cardinal Product Equal to Maximum]]: +:$\card {\R \times \R} = \map \max {\mathfrak c, \mathfrak c}$ +Thus: +:$\card \BB \le \mathfrak c$ +{{qed}} +\end{proof}<|endoftext|> +\section{Construction of Regular Prime p-Gon Exists iff p is Fermat Prime} +Tags: Regular Polygons, Fermat Primes + +\begin{theorem} +Let $p$ be a [[Definition:Prime Number|prime number]]. +Then there exists a [[Definition:Compass and Straightedge Construction|compass and straightedge construction]] for a [[Definition:Regular Polygon|regular $p$-gon]] {{iff}} $p$ is a [[Definition:Fermat Prime|Fermat prime]]. +\end{theorem}<|endoftext|> +\section{Cardinalities form Inequality implies Difference is Nonempty} +Tags: Cardinals + +\begin{theorem} +Let $X, Y$ be [[Definition:Set|sets]]. +Let +:$\left\vert{X}\right\vert < \left\vert{Y}\right\vert$ +where $\left\vert{X}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $X$. +Then: +:$Y \setminus X \ne \varnothing$ +\end{theorem} + +\begin{proof} +Aiming for a contradiction suppose that +: $Y \setminus X = \varnothing$ +Then by [[Set Difference with Superset is Empty Set]]: +:$Y \subseteq X$ +Hence by [[Subset implies Cardinal Inequality]]: +:$\left\vert{Y}\right\vert \leq \left\vert{X}\right\vert$ +This contradicts: +:$\left\vert{X}\right\vert < \left\vert{Y}\right\vert$ +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Set of Subsets of Reals with Cardinality less than Continuum Cardinality of Local Minimums of Union Closure less than Continuum} +Tags: Infinite Sets + +\begin{theorem} +Let $\BB$ be a [[Definition:Set of Sets|set]] of [[Definition:Subset|subsets]] of $\R$. +Let: +:$\size \BB < \mathfrak c$ +where +:$\size \BB$ denotes the [[Definition:Cardinality|cardinality]] of $\BB$ +:$\mathfrak c = \size \R$ denotes [[Definition:Cardinality of Continuum|continuum]]. +Let +:$X = \leftset {x \in \R: \exists U \in \set {\bigcup \GG: \GG \subseteq \BB}: x}$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $\rightset U$ +Then: +:$\size X < \mathfrak c$ +\end{theorem} + +\begin{proof} +We will prove that: +:$(1): \quad \size \BB \aleph_0 < \mathfrak c$ +where $\aleph_0 = \size \N$ by [[Aleph Zero equals Cardinality of Naturals]]. +In the case when $\size \BB = \mathbf 0$ we have by [[Zero of Cardinal Product is Zero]]: +:$\size \BB \aleph_0 = \mathbf 0 < \mathfrak c$ +In the case when $\mathbf 0 < \size \BB < \aleph_0$: +{{begin-eqn}} +{{eqn | l = \size \BB \aleph_0 + | r = \aleph_0 \size \BB + | c = [[Product of Cardinals is Commutative]] +}} +{{eqn | r = \size {\N \times \BB} + | c = {{Defof|Product of Cardinals}} +}} +{{eqn | r = \map \max {\size \N, \size \BB} + | c = [[Cardinal Product Equal to Maximum]] +}} +{{eqn | r = \aleph_0 + | c = because $\size \BB < \aleph_0$ +}} +{{eqn | o = < + | r = \mathfrak c + | c = [[Aleph Zero is less than Continuum]] +}} +{{end-eqn}} +In the case when $\size \BB \ge \aleph_0$ we have: +{{begin-eqn}} +{{eqn | l = \size \BB \aleph_0 + | r = \size {\BB \times \N} + | c = definition of [[Definition:Product of Cardinals]] +}} +{{eqn | r = \map \max {\size \BB, \size \N} + | c = [[Cardinal Product Equal to Maximum]] +}} +{{eqn | r = \size \BB + | c = because $\size \BB \ge \aleph_0$ +}} +{{eqn | o = < + | r = \mathfrak c + | c = assumption +}} +{{end-eqn}} +Define: +:$Y = \leftset {x \in \R: \exists U \in \BB: x}$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $\rightset U$ +We will show that $X \subseteq Y$ by definition of [[Definition:Subset|subset]]. +Let $x \in X$. +By definition of $X$: +:$\exists U \in \leftset {\bigcup \GG: \GG \subseteq \BB}: x$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $\rightset U$ +:$\exists \GG \subseteq \BB: U = \bigcup \GG$ +By definition of [[Definition:Local Minimum in Set of Reals|local minimum]]: +:$x \in U$ +By definition of [[Definition:Union of Set of Sets|union]]: +:$\exists V \in \GG: x \in V$ +By definition of [[Definition:Subset|subset]] +:$V \in \BB$ +By definition of [[Definition:Local Minimum in Set of Reals|local minimum]] +:$\exists y \in \R: y < x \land \openint y x \cap U = \O$ +By [[Set is Subset of Union]]: +:$V \subseteq U$ +Then: +:$\exists y \in \R: y < x \land \openint y x \cap V = \O$ +By definition: +:$x$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $V$ +Thus by definition of $Y$ +:$x \in Y$ +So +:$(2): \quad X \subseteq Y$ +Define $\family {Z_A}_{A \mathop \in \BB}$ as: +:$Z_A = \leftset {x \in \R: x}$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $\rightset A$ +We will prove that: +:$(3): \quad Y \subseteq \ds \bigcup_{A \mathop \in \BB} Z_A$ +Let $x \in Y$. +By definition of $Y$: +:$\exists U \in \BB: x$ is [[Definition:Local Minimum in Set of Reals|local minimum]] in $U$ +By definition of $Z_U$: +:$x \in Z_U$ +Thus by definition of [[Definition:Union of Family|union]]: +:$x \in \ds \bigcup_{A \mathop \in \BB} Z_A$ +This ends the proof of inclusion. +{{qed|lemma}} +By [[Set of Local Minimum is Countable]]: +:$\forall A \in \BB: Z_A$ is [[Definition:Countable Set|countable]] +By [[Countable iff Cardinality not greater Aleph Zero]]: +:$\forall A \in \BB: \size {Z_A} \le \aleph_0$ +By [[Cardinality of Union not greater than Product]]: +:$(4): \quad \ds \size {\bigcup_{A \mathop \in \BB} Z_A} \le \size \BB \aleph_0$ +Thus: +{{begin-eqn}} +{{eqn | l = \size X + | o = \le + | r = \size Y + | c = $(2)$ and [[Subset implies Cardinal Inequality]] +}} +{{eqn | o = \le + | r = \size {\bigcup_{A \mathop \in \BB} Z_A} + | c = $(3)$ and [[Subset implies Cardinal Inequality]] +}} +{{eqn | o = \le + | r = \size \BB \aleph_0 + | c = $(4)$ +}} +{{eqn | o = < + | r = \mathfrak c + | c = $(1)$ +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Slope of Secant} +Tags: Analytic Geometry + +\begin{theorem} +Let $f: \R \to \R$ be a [[Definition:Real Function|real function]]. +Let the [[Definition:Graph of Mapping|graph]] of $f$ be depicted on a [[Definition:Cartesian Plane|Cartesian plane]]. +:[[File:SecantToCurve.png|400px]] +Let $AB$ be a [[Definition:Secant of Curve|secant]] of $f$ where: +:$A = \tuple {x, \map f x}$ +:$A = \tuple {x + h, \map f {x + h} }$ +Then the [[Definition:Slope of Straight Line|slope]] of $AB$ is given by: +:$\dfrac {\map f {x + h} - \map f x} h$ +\end{theorem} + +\begin{proof} +The [[Definition:Slope of Straight Line|slope]] of $AB$ is defined as the change in $y$ divided by the change in $x$. +Between $A$ and $B$: +:the change in $x$ is $\paren {x + h} - x = h$ +:the change in $y$ is $\map f {x + h} - \map f x$. +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Derivative of Curve at Point} +Tags: Analytic Geometry, Differential Calculus + +\begin{theorem} +Let $f: \R \to \R$ be a [[Definition:Real Function|real function]]. +Let the [[Definition:Graph of Mapping|graph]] $G$ of $f$ be depicted on a [[Definition:Cartesian Plane|Cartesian plane]]. +Then the [[Definition:Derivative of Real Function at Point|derivative]] of $f$ at $x = \xi$ is equal to the [[Definition:Tangent to Curve|tangent]] to $G$ at $x = \xi$. +\end{theorem} + +\begin{proof} +Let $f: \R \to \R$ be a [[Definition:Real Function|real function]]. +:[[File:DerivativeOfCurve.png|400px]] +Let the [[Definition:Graph of Mapping|graph]] $G$ of $f$ be depicted on a [[Definition:Cartesian Plane|Cartesian plane]]. +Let $A = \tuple {\xi, \map f \xi}$ be a [[Definition:Point|point]] on $G$. +Consider the [[Definition:Secant of Curve|secant]] $AB$ to $G$ where $B = \tuple {\xi + h, \map f {\xi + h} }$. +From [[Slope of Secant]], the [[Definition:Slope of Straight Line|slope]] of $AB$ is given by: +:$\dfrac {\map f {x + h} - \map f x} h$ +By taking $h$ smaller and smaller, the [[Definition:Secant of Curve|secant]] approaches more and more closely the [[Definition:Tangent to Curve|tangent]] to $G$ at $A$. +{{qed}} +\end{proof}<|endoftext|> +\section{Derivative of Square Function} +Tags: Derivatives + +\begin{theorem} +Let $f: \R \to \R$ be the [[Definition:Square (Algebra)|square function]]: +:$\forall x \in \R: \map f x = x^2$ +Then the [[Definition:Derivative|derivative]] of $f$ is given by: +:$\map {f'} x = 2 x$ +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \map {f'} x + | r = \lim_{h \mathop \to 0} \frac {\map f {x + h} - \map f x} h + | c = {{Defof|Derivative of Real Function at Point}} +}} +{{eqn | r = \lim_{h \mathop \to 0} \frac {\paren {x + h}^2 - x^2} h + | c = +}} +{{eqn | r = \lim_{h \mathop \to 0} \frac {x^2 + 2 x h + h^2 - x^2} h + | c = +}} +{{eqn | r = \lim_{h \mathop \to 0} \frac {2 x h + h^2} h + | c = +}} +{{eqn | r = \lim_{h \mathop \to 0} 2 x + h + | c = +}} +{{eqn | r = 2 x + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Countable iff Cardinality not greater than Aleph Zero} +Tags: Countable Sets, Aleph Mapping + +\begin{theorem} +Let $X$ be [[Definition:Set|set]]. +$X$ is [[Definition:Countable Set|countable]] {{iff}}: +$\left\vert{X}\right\vert \leq \aleph_0$ +where: +:$\left\vert{X}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $X$ +:$\aleph_0 = \left\vert{\N}\right\vert$ by [[Aleph Zero equals Cardinality of Naturals]]. +\end{theorem} + +\begin{proof} +:$X$ is [[Definition:Countable Set|countable]] +{{iff}}: +:there exists an [[Definition:Injection|injection]] $f: X \to \N$ by definition of [[Definition:Countable Set|countable set]] +{{iff}}: +:$\left\vert{X}\right\vert \leq \left\vert{\N}\right\vert$ by [[Injection iff Cardinal Inequality]] +{{iff}}: +:$\left\vert{X}\right\vert \leq \aleph_0$ +{{qed}} +\end{proof}<|endoftext|> +\section{Aleph Zero equals Cardinality of Naturals} +Tags: Aleph Mapping + +\begin{theorem} +$\aleph_0 = \left\vert{\N}\right\vert$ +where +:$\aleph$ denotes the [[Definition:Aleph Mapping|aleph mapping]], +:$\left\vert{\N}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $\N$. +\end{theorem} + +\begin{proof} +{{begin-eqn}} +{{eqn | l = \aleph_0 + | r = \left\vert{\aleph_0}\right\vert + | c = [[Cardinal of Cardinal Equal to Cardinal]] +}} +{{eqn | r = \left\vert{\omega}\right\vert + | c = definition of [[Definition:Aleph Mapping|aleph mapping]] +}} +{{eqn | r = \left\vert{\N}\right\vert + | c = [[Definition:Natural Numbers/Construction|construction of natural numbers]] +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Distance Moved by Body from Rest under Constant Acceleration} +Tags: Mechanics + +\begin{theorem} +Let a [[Definition:Body|body]] $B$ be [[Definition:Stationary|stationary]]. +Let $B$ be subject to a [[Definition:Constant|constant]] [[Definition:Acceleration|acceleration]]. +Then the [[Definition:Distance (Linear Measure)|distance]] travelled by $B$ is [[Definition:Proportion|proportional]] to the [[Definition:Square (Algebra)|square]] of the [[Definition:Length of Time|length of time]] $B$ is under the [[Definition:Acceleration|acceleration]]. +\end{theorem} + +\begin{proof} +From [[Body under Constant Acceleration/Distance after Time|Body under Constant Acceleration: Distance after Time]]: +:$\mathbf s = \mathbf u t + \dfrac {\mathbf a t^2} 2$ +where: +:$\mathbf s$ is the [[Definition:Displacement|displacement]] of $B$ at [[Definition:Time|time]] $t$ from its initial position at [[Definition:Time|time]] $t$ +:$\mathbf u$ is the [[Definition:Velocity|velocity]] at [[Definition:Time|time]] $t = 0$ +:$\mathbf a$ is the [[Definition:Constant|constant]] [[Definition:Acceleration|acceleration]] $t$ +In this scenario, $\mathbf u = \mathbf 0$. +Thus: +:$\mathbf s = \dfrac {\mathbf a} 2 t^2$ +and so by taking the [[Definition:Magnitude|magnitudes]] of the [[Definition:Vector Quantity|vector quantities]]: +:$s = \dfrac a 2 t^2$ +Hence the result, by definition of [[Definition:Proportion|proportional]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Body under Constant Acceleration/Velocity after Time} +Tags: Mechanics + +\begin{theorem} +: $\mathbf v = \mathbf u + \mathbf a t$ +\end{theorem} + +\begin{proof} +By definition of [[Definition:Acceleration|acceleration]]: +: $\dfrac {\mathrm d \mathbf v} {\mathrm d t} = \mathbf a$ +By [[Solution to Linear First Order Ordinary Differential Equation]]: +: $\mathbf v = \mathbf c + \mathbf a t$ +where $\mathbf c$ is a [[Definition:Constant|constant]] [[Definition:Vector|vector]]. +We are given the [[Definition:Initial Condition|initial condition]]: +:$\big.{\mathbf v}\big\rvert_{\, t \mathop = 0} = \mathbf u$ +from which it follows immediately that: +: $\mathbf v = \mathbf u + \mathbf a t$ +{{qed}} +[[Category:Mechanics]] +ou4smu10w6yt8nldzxc7v4morh1wbvp +\end{proof}<|endoftext|> +\section{Body under Constant Acceleration/Distance after Time} +Tags: Mechanics + +\begin{theorem} +:$\mathbf s = \mathbf u t + \dfrac {\mathbf a t^2} 2$ +\end{theorem} + +\begin{proof} +From [[Body under Constant Acceleration/Velocity after Time|Body under Constant Acceleration: Velocity after Time]]: +:$\mathbf v = \mathbf u + \mathbf a t$ +By definition of [[Definition:Velocity|velocity]], this can be expressed as: +:$\dfrac {\d \mathbf s} {\d t} = \mathbf u + \mathbf a t$ +where both $\mathbf u$ and $\mathbf a$ are [[Definition:Constant|constant]]. +By [[Solution to Linear First Order Ordinary Differential Equation]]: +:$\mathbf s = \mathbf c + \mathbf u t + \dfrac {\mathbf a t^2} 2$ +where $\mathbf c$ is a [[Definition:Constant|constant]] [[Definition:Vector|vector]]. +We are (implicitly) given the [[Definition:Initial Condition|initial condition]]: +:$\bigvalueat {\mathbf s} {t \mathop = 0} = \mathbf 0$ +from which it follows immediately that: +:$\mathbf s = \mathbf u t + \dfrac {\mathbf a t^2} 2$ +{{qed}} +\end{proof}<|endoftext|> +\section{Body under Constant Acceleration/Velocity after Distance} +Tags: Mechanics + +\begin{theorem} +:$\mathbf v \cdot \mathbf v = \mathbf u \cdot \mathbf u + 2 \mathbf a \cdot \mathbf s$ +\end{theorem} + +\begin{proof} +From [[Body under Constant Acceleration/Velocity after Time|Body under Constant Acceleration: Velocity after Time]] +:$\mathbf v = \mathbf u + \mathbf a t$ +Then: +{{begin-eqn}} +{{eqn | l = \mathbf v \cdot \mathbf v + | r = \paren {\mathbf u + \mathbf a t} \cdot \paren {\mathbf u + \mathbf a t} + | c = +}} +{{eqn | r = \mathbf u \cdot \mathbf u + \mathbf u \cdot \mathbf a t + \mathbf a t \cdot \mathbf u + \paren {\mathbf a t} \cdot \paren {\mathbf a t} + | c = [[Dot Product Distributes over Addition]] +}} +{{eqn | r = \mathbf u \cdot \mathbf u + 2 \mathbf u \cdot \mathbf a t + \paren {\mathbf a t} \cdot \paren {\mathbf a t} + | c = [[Dot Product Operator is Commutative]] +}} +{{eqn | r = \mathbf u \cdot \mathbf u + 2 \mathbf u \cdot \mathbf a t + \mathbf a \cdot \mathbf a t^2 + | c = [[Dot Product Associates with Scalar Multiplication]] +}} +{{eqn | n = 1 + | r = \mathbf u \cdot \mathbf u + \mathbf a \cdot \paren {2 \mathbf u t + \mathbf a t^2} + | c = [[Dot Product Distributes over Addition]] +}} +{{end-eqn}} +From [[Body under Constant Acceleration/Distance after Time|Body under Constant Acceleration: Distance after Time]]: +:$\mathbf s = \mathbf u t + \dfrac {\mathbf a t^2} 2$ +Substituting for $\mathbf s$ in $(1)$ gives: +:$\mathbf v \cdot \mathbf v = \mathbf u \cdot \mathbf u + 2 \mathbf a \cdot \mathbf s$ +and the proof is complete. +{{qed}} +[[Category:Mechanics]] +eslteq9h7qxgpbtjmwg2xkktl3nhdzd +\end{proof}<|endoftext|> +\section{Body in Free Fall moves in Parabolic Path} +Tags: Mechanics + +\begin{theorem} +A [[Definition:Body|body]] in [[Definition:Free Fall|free fall]] above the surface of the [[Definition:Earth|Earth]] follows a path approximating to a [[Definition:Parabola|parabola]]. +\end{theorem}<|endoftext|> +\section{Aleph Zero is less than Continuum} +Tags: Infinite Sets, Aleph Mapping + +\begin{theorem} +$\aleph_0 < \mathfrak c$ +where +:$\aleph$ denotes the [[Definition:Aleph Mapping|aleph mapping]], +:$\mathfrak c$ denotes [[Definition:Cardinality of Continuum|continuum]], the [[Definition:Cardinality|cardinality]] of [[Definition:Real Number|real numbers]]. +\end{theorem} + +\begin{proof} +By [[Continuum equals Cardinality of Power Set of Naturals]]: +:$\mathfrak c = \left\vert{\mathcal P \left({\N}\right)}\right\vert$ +where: +:$\mathcal P \left({\N}\right)$ denotes the [[Definition:Power Set|power set]] of $\N$ +:$\left\vert{\mathcal P \left({\N}\right)}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $\mathcal P \left({\N}\right)$. +By [[Cardinality of Set less than Cardinality of Power Set]]: +:$\left\vert{\N}\right\vert < \left\vert{\mathcal P \left({\N}\right)}\right\vert$ +Thus by [[Aleph Zero equals Cardinality of Naturals]]: +:$\aleph_0 < \mathfrak c$ +{{qed}} +\end{proof}<|endoftext|> +\section{Cardinality of Set less than Cardinality of Power Set} +Tags: Cardinals, Power Set, Cardinality + +\begin{theorem} +Let $X$ be a [[Definition:Set|set]]. +Then: +:$\left\vert{X}\right\vert < \left\vert{\mathcal P \left({X}\right)}\right\vert$ +where +:$\left\vert{X}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $X$, +:$\mathcal P \left({X}\right)$ denotes the [[Definition:Power Set|power set]] of $X$. +\end{theorem} + +\begin{proof} +By [[No Bijection from Set to its Power Set]]: +: there exist no [[Definition:Bijection|bijections]] $X \to \mathcal P \left({X}\right)$ +Then by definition of [[Definition:Set Equivalence|set equivalence]]: +:$X \not\sim \mathcal P \left({X}\right)$ +Hence by definition of [[Definition:Cardinality|cardinality]]: +:$(1): \quad \left\vert{X}\right\vert \ne \left\vert{\mathcal P \left({X}\right)}\right\vert$ +By [[Cardinality of Set of Singletons]]: +:$(2): \quad \left\vert{\left\{{\left\{{x}\right\}: x \in X}\right\}}\right\vert = \left\vert{X}\right\vert$ +By definition of [[Definition:Subset|subset]]: +:$\forall x \in X: \left\{{x}\right\} \subseteq X$ +Then by definition of [[Definition:Power Set|power set]]: +:$\forall x \in X: \left\{{x}\right\} \in \mathcal P \left({X}\right)$ +Hence by definition of [[Definition:Subset|subset]]: +:$\left\{{\left\{{x}\right\}: x \in X}\right\} \subseteq \mathcal P \left({X}\right)$ +Then by [[Subset implies Cardinal Inequality]] and $(2)$: +:$\left\vert{X}\right\vert \leq \left\vert{\mathcal P \left({X}\right)}\right\vert$ +Thus by $(1)$: +:$\left\vert{X}\right\vert < \left\vert{\mathcal P \left({X}\right)}\right\vert$ +{{qed}} +\end{proof}<|endoftext|> +\section{Volume of Solid of Revolution} +Tags: Integral Calculus, Solids of Revolution + +\begin{theorem} +Let $f: \R \to \R$ be a [[Definition:Real Function|real function]] which is [[Definition:Integrable|integrable]] on the [[Definition:Closed Real Interval|interval]] $\closedint a b$. +Let the [[Definition:Point|points]] be defined: +:$A = \tuple {a, \map f a}$ +:$B = \tuple {b, \map f b}$ +:$C = \tuple {b, 0}$ +:$D = \tuple {a, 0}$ +Let the [[Definition:Plane Figure|figure]] $ABCD$ be defined as being bounded by the [[Definition:Straight Line|straight lines]] $y = 0$, $x = a$, $x = b$ and the [[Definition:Curve|curve]] defined by $\set {\map f x: a \le x \le b}$. +Let the [[Definition:Solid of Revolution|solid of revolution]] $S$ be generated by rotating $ABCD$ around the [[Definition:X-Axis|$x$-axis]] (that is, $y = 0$). +Then the [[Definition:Volume|volume]] $V$ of $S$ is given by: +:$\displaystyle V = \pi \int_a^b \paren {\map f x}^2 \rd x$ +\end{theorem} + +\begin{proof} +:[[File:VolumeOfSolidOfRevolution.png|500px]] +Consider a [[Definition:Rectangle|rectangle]] bounded by the lines: +:$y = 0$ +:$x = \xi$ +:$x = \xi + \delta x$ +:$y = \map f x$ +Consider the [[Definition:Cylinder|cylinder]] generated by revolving it about the [[Definition:X-Axis|$x$-axis]]. +By [[Volume of Cylinder]], the [[Definition:Volume|volume]] of this [[Definition:Cylinder|cylinder]] is: +:$V_\xi = \pi \paren {\map f x}^2 \delta x$ +{{finish|Needs finishing off, needs a rigorous treatment.}} +\end{proof}<|endoftext|> +\section{Continuum equals Cardinality of Power Set of Naturals} +Tags: Infinite Sets, Cardinals, Power Set + +\begin{theorem} +$\mathfrak c = \card {\powerset \N}$ +where +:$\powerset \N$ denotes the [[Definition:Power Set|power set]] of $\N$ +:$\card {\powerset \N}$ denotes the [[Definition:Cardinality|cardinality]] of $\powerset \N$ +:$\mathfrak c = \card \R$ denotes the [[Definition:Cardinality of Continuum|continuum]]. +\end{theorem} + +\begin{proof} +By [[Reals are Isomorphic to Dedekind Cuts]] there exists [[Definition:Bijection|bijection]]: +:$f: \R \to \mathscr D$ +where: +:$\mathscr D$ denotes the [[Definition:Set of Sets|set]] of all [[Definition:Dedekind Cut|Dedekind cuts]] of $\struct {\Q, \le}$. +Dedekind's cuts are [[Definition:Subset|subsets]] of $\Q$. +Therefore by definition of [[Definition:Power Set|power set]]: +:$\mathscr D \subseteq \powerset \Q$ +By [[Subset implies Cardinal Inequality]]: +:$\card {\mathscr D} \le \card {\powerset \Q}$ +By [[Rational Numbers are Countably Infinite]]: +:$\Q$ is [[Definition:Countably Infinite|countably infinite]]. +Then by definition of [[Definition:Countably Infinite Set|countably infinite]] there exists a [[Definition:Bijection|bijection]]: +:$g: \Q \to \N$ +By definition of [[Definition:Set Equivalence|set equivalence]]: +:$\Q \sim \N$ +Hence by definition of [[Definition:Cardinality|cardinality]]: +:$\card \Q = \card \N$ +Then by [[Cardinality of Power Set is Invariant]]: +:$\card {\powerset \Q} = \card {\powerset \N}$ +By definition of [[Definition:Set Equivalence|set equivalence]]: +:$\R \sim \mathscr D$ +Hence by definition of [[Definition:Cardinality|cardinality]]: +:$\card \R = \card {\mathscr D}$ +Thus: +:$\mathfrak c \le \card {\powerset \N}$ +Define a [[Definition:Mapping|mapping]] $h: \map {\operatorname {Fin} } \N \times \powerset \N \to \R^+$: +:$\forall F \in \map {\operatorname {Fin} } \N, A \in \powerset \N: \map h {F, A} = \displaystyle \sum_{i \mathop \in F} 2^i + \sum_{i \mathop \in A} \paren {\frac 1 2}^i$ +where $\map {\operatorname {Fin} } \N$ denotes the [[Definition:Set of Sets|set]] of all [[Definition:Finite Set|finite]] [[Definition:Subset|subsets]] of $\N$. +A pair $\tuple {F, A}$ corresponds to binary denotation of a real number $\map h {F, A}$. +It means that $h$ is a [[Definition:Surjection|surjection]]. +By [[Surjection iff Cardinal Inequality]]: +:$\card {\map {\operatorname {Fin} } \N \times \powerset \N} \le \card {\R^+}$ +By definition of [[Definition:Subset|subset]]: +:$\map {\operatorname {Fin} } \N \subseteq \powerset \N$ +Then by [[Subset implies Cardinal Inequality]]: +:$\card {\map {\operatorname {Fin} } \N} \le \card {\powerset \N}$ +{{begin-eqn}} +{{eqn | l = \card {\map {\operatorname {Fin} } \N \times \powerset \N} + | r = \max \set {\card {\map {\operatorname {Fin} } \N}, \card {\powerset \N} } + | c = [[Cardinal Product Equal to Maximum]] +}} +{{eqn | r = \card {\powerset \N} +}} +{{end-eqn}} +Because $\R^+ \subseteq \R$, we have by [[Subset implies Cardinal Inequality]]: +:$\card {\R^+} \le \card \R$ +Thus: +:$\card {\powerset \N} \le \mathfrak c$ +Hence the result: +:$\mathfrak c = \card {\powerset \N}$ +{{qed}} +\end{proof}<|endoftext|> +\section{Acceleration is Second Derivative of Displacement with respect to Time} +Tags: Mechanics + +\begin{theorem} +The '''acceleration''' $\mathbf a$ of a [[Definition:Body|body]] $M$ is the [[Definition:Second Derivative|second derivative]] of the [[Definition:Displacement|displacement]] $\mathbf s$ of $M$ from a given [[Definition:Point of Reference|point of reference]] [[Definition:Differentiation With Respect To|with respect to]] [[Definition:Time|time]] $t$: +:$\mathbf a = \dfrac {\d^2 \mathbf s} {\d t^2}$ +\end{theorem} + +\begin{proof} +By definition, the [[Definition:Acceleration|acceleration]] of a [[Definition:Body|body]] $M$ is defined as the [[Definition:Derivative|first derivative]] of the [[Definition:Velocity|velocity]] $\mathbf v$ of $M$ [[Definition:Relative Velocity|relative to]] a given [[Definition:Point of Reference|point of reference]] [[Definition:Differentiation With Respect To|with respect to]] [[Definition:Time|time]]: +:$\mathbf a = \dfrac {\d \mathbf v} {\d t}$ +Also by definition, the [[Definition:Velocity|velocity]] of $M$ is defined as the [[Definition:Derivative|first derivative]] of the [[Definition:Displacement|displacement]] of $M$ from a given [[Definition:Point of Reference|point of reference]] [[Definition:Differentiation With Respect To|with respect to]] [[Definition:Time|time]]: +:$\mathbf v = \dfrac {\d \mathbf s} {\d t}$ +That is: +:$\mathbf a = \map {\dfrac \d {\d t} } {\dfrac {\d \mathbf s} {\d t} }$ +Hence the result by definition of the [[Definition:Second Derivative|second derivative]]. +{{qed}} +\end{proof}<|endoftext|> +\section{Equation of Catenary/Formulation 2} +Tags: Catenary + +\begin{theorem} +The '''[[Definition:Catenary|catenary]]''' is described by the equation: +:$y = \dfrac a 2 \paren {e^{x / a} + e^{-x / a} } = a \cosh \dfrac x a$ +where $a$ is a [[Definition:Constant|constant]]. +The lowest point of the [[Definition:Chain (Physics)|chain]] is at $\tuple {0, a}$. +\end{theorem}<|endoftext|> +\section{Cardinality of Power Set is Invariant} +Tags: Cardinals, Power Set + +\begin{theorem} +Let $X, Y$ be [[Definition:Set|sets]]. +Let $\card X = \card Y$ +where $\card X$ denotes the [[Definition:Cardinality|cardinality]] of $X$. +Then: +:$\card {\powerset X} = \card {\powerset Y}$ +where $\powerset X$ denotes the [[Definition:Power Set|power set]] of $X$. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Cardinality|cardinality]]: +:$X \sim Y$ +where $\sim$ denotes the [[Definition:Set Equivalence|set equivalence]]. +Then by definition of [[Definition:Set Equivalence|set equivalence]]: +: there exists a [[Definition:Bijection|bijection]] $f: X \to Y$ +By definition of [[Definition:Bijection|bijection]] +:$f$ is an [[Definition:Injection|injection]] and a [[Definition:Surjection|surjection]]. +By [[Direct Image Mapping of Injection is Injection]]: +:the [[Definition:Direct Image Mapping of Mapping|direct image mapping]] $\map {f^\to}: \powerset X \to \powerset Y$ is an [[Definition:Injection|injection]]. +By [[Direct Image Mapping of Surjection is Surjection]]: +:$f^\to$ is a [[Definition:Surjection|surjection]]. +Then by definition of [[Definition:Bijection|bijection]]: +:$f^\to: \powerset X \to \powerset Y$ is a [[Definition:Bijection|bijection]]. +Hence by definition of [[Definition:Set Equivalence|set equivalence]]: +:$\powerset X \sim \powerset Y$ +Thus the result by definition of [[Definition:Cardinality|cardinality]]: +:$\card {\powerset X} = \card {\powerset Y}$ +{{qed}} +\end{proof}<|endoftext|> +\section{Reals are Isomorphic to Dedekind Cuts} +Tags: Real Numbers, Dedekind Cuts + +\begin{theorem} +Let $\mathscr D$ be [[Definition:Set of Sets|set]] of all [[Definition:Dedekind Cut|Dedekind cuts]] of the [[Definition:Totally Ordered Set|totally ordered set]] $\struct {\Q, \le}$. +Define a [[Definition:Mapping|mapping]] $f: \R \to \mathscr D$ as: +:$\forall x \in \R: \map f x = \set {y \in \Q: y < x}$ +Then $f$ is a [[Definition:Bijection|bijection]]. +\end{theorem} + +\begin{proof} +First, we will prove that: +:$\forall x \in \R: \map f x \in \mathscr D$ +Let $x \in \R$. +It is to be proved that $\map f x$ is a [[Definition:Proper Subset|proper subset]] of $\Q$ such that: +:$(1): \quad \forall z \in \map f x: \forall y \in \Q: y < z \implies y \in \map f x$ +:$(2): \quad \forall z \in \map f x: \exists y \in \map f x: z < y$ +We have that: +:$x \notin \map f x$ +Therefore by definition $\map f x$ is a [[Definition:Proper Subset|proper subset]] of $\Q$. +Ad. $(1)$: Let $z \in \map f x, y \in \Q$ such that: +:$y < z$ +By definition of $\map f x$: +:$z < x$ +Then: +:$y < x$ +Thus by definition of $\map f x$: +:$y \in \map f x$ +Ad. $(2)$: Let $z \in \map f x$. +By definition of $\map f x$: +:$z < x$ +By [[Between two Real Numbers exists Rational Number]]: +:$\exists r \in \Q: z < r < x$ +Then by definition of $\map f x$: +:$r \in \map f x$ +Thus: +:$\exists r \in \map f x: z < r$ +By definition of [[Definition:Bijection|bijection]] it suffices to prove that $f$ is an [[Definition:Injection|injection]] and a [[Definition:Surjection|surjection]]. +We will show by definition that $f: \R \to \mathscr D$ is an [[Definition:Injection|injection]]. +Let $x_1, x_2 \in \R$ such that +:$\map f {x_1} = \map f {x_2}$ +{{AimForCont}} $x_1 \ne x_2$. +{{WLOG}} suppose $x_1 < x_2$. +By [[Between two Real Numbers exists Rational Number]]: +:$\exists r \in \Q: x_1 < r < x_2$ +Then by definition of $\map f x$: +:$r \notin \map f {x_1}$ +and +:$r \in \map f {x_2}$ +This contradicts $\map f {x_1} = \map f {x_2}$. +We will prove by definition that $f: \R \to \mathscr D$ is a [[Definition:Surjection|surjection]]. +Let $L \in \mathscr D$. +By definition of [[Definition:Dedekind Cut|Dedekind cut]]: +:$L$ is a [[Definition:Proper Subset|proper subset]] of $\Q$. +By definition of [[Definition:Proper Subset|proper subset]]: +:$\exists r \in \Q: r \notin L$ +By definition of [[Definition:Dedekind Cut|Dedekind cut]]: +:$(3): \quad \forall x \in L: \forall y \in \Q: y < x \implies y \in L$ +Then +:$\forall x \in L: r \not < x \land r \ne x$ +Hence +:$\forall x \in L: r > x$ +Then $L$ is [[Definition:Bounded Above Subset of Real Numbers|bounded above]] by definition. +By definition of [[Definition:Supremum of Set|supremum]]: +:$\map \sup L \le r$ +Hence: +:$\map \sup L \in \R$ +By definition of [[Definition:Supremum of Set|supremum]]: +:$\map \sup L$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] of $L$. +Then by definition of [[Definition:Upper Bound of Subset of Real Numbers|upper bound]]: +:$\forall x \in L: x < \map \sup L$ +We will prove that: +:$\forall x \in \Q: x < \map \sup L \implies x \in L$ +Let $x \in \Q$ such that: +:$x < \map \sup L$ +{{AimForCont}} $x \notin L$. +By $(3)$: +:$\forall x \in L: r \ge x$ +By definition: +:$r$ is an [[Definition:Upper Bound of Subset of Real Numbers|upper bound]] of $L$. +By definition of [[Definition:Supremum of Set|supremum]]: +:$r \ge \map \sup L$ +This contradicts $x < \map \sup L$. +Thus: +:$L = \map f {\map \sup L}$ +{{qed}} +[[Category:Real Numbers]] +[[Category:Dedekind Cuts]] +rf1bi2w5yjvrm65r9a92qd7eo5dskgu +\end{proof}<|endoftext|> +\section{Slope of Orthogonal Curves} +Tags: Analytic Geometry + +\begin{theorem} +Let $C_1$ and $C_2$ be [[Definition:Curve|curves]] in a [[Definition:Cartesian Plane|cartesian plane]]. +Let $C_1$ and $C_2$ [[Definition:Intersection (Geometry)|intersect]] each other at $P$. +Let the [[Definition:Slope|slope]] of $C_1$ and $C_2$ at $P$ be $m_1$ and $m_2$. +Then $C_1$ and $C_2$ are [[Definition:Orthogonal (Analytic Geometry)|orthogonal]] {{iff}}: +:$m_1 = -\dfrac 1 {m_2}$ +\end{theorem} + +\begin{proof} +Let the [[Definition:Slope|slopes]] of $C_1$ and $C_2$ at $P$ be defined by the [[Definition:Vector|vectors]] $\mathbf v_1$ and $\mathbf v_2$ represented as [[Definition:Column Matrix|column matrices]]: +:$\mathbf v_1 = \begin{bmatrix} x_1 \\ y_1 \end{bmatrix} , \mathbf v_2 = \begin{bmatrix} x_2 \\ y_2 \end{bmatrix}$ +By [[Non-Zero Vectors Orthogonal iff Perpendicular]]: +:$\mathbf v_1 \cdot \mathbf v_2 = 0$ {{iff}} $C_1$ is [[Definition:Orthogonal (Analytic Geometry)|orthogonal]] to $C_2$ +where $\mathbf v_1 \cdot \mathbf v_2$ denotes the [[Definition:Dot Product|dot product]] of $C_1$ and $C_2$. +Thus: +{{begin-eqn}} +{{eqn | l = \mathbf v_1 \cdot \mathbf v_2 + | r = 0 + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = x_1 x_2 + y_1 y_2 + | r = 0 + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = \frac {y_1} {x_1} + \frac {x_2} {y_2} + | r = 0 + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = \frac {x_1} {y_1} + | r = -\frac 1 {\paren {\dfrac {y_2} {x_2} } } + | c = +}} +{{eqn | ll= \leadstoandfrom + | l = m_1 + | r = -\frac 1 {m_2} + | c = +}} +{{end-eqn}} +{{qed}} +[[Category:Analytic Geometry]] +nq4m6foku7g4yfj2punw324qg09tx4k +\end{proof}<|endoftext|> +\section{Orthogonal Trajectories/Concentric Circles} +Tags: Orthogonal Trajectories, Circles + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]]: +:$(1): \quad x^2 + y^2 = c$ +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$y = c x$ +\end{theorem} + +\begin{proof} +:[[File:ConcentricCirclesOrthogonalTrajectories.png|500px]] +We use the technique of [[Definition:Formation of Ordinary Differential Equation by Elimination|formation of ordinary differential equation by elimination]]. +[[Definition:Differentiation|Differentiating]] $(1)$ {{WRT|Differentiation}} $x$ gives: +:$2 x + 2 y \dfrac {\d y} {\d x} = 0$ +from which: +:$\dfrac {\d y} {\d x} = -\dfrac x y$ +Thus from [[Orthogonal Trajectories of One-Parameter Family of Curves]], the [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by: +:$\dfrac {\d y} {\d x} = \dfrac y x$ +Using the technique of [[Separation of Variables]]: +:$\displaystyle \int \frac {\d y} y = \int \frac {\d x} x$ +which by [[Primitive of Reciprocal]] gives: +:$\ln y = \ln x + \ln c$ +or: +:$y = c x$ +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Angle of Tangent to Radius in Polar Coordinates} +Tags: Analytic Geometry, Polar Coordinates + +\begin{theorem} +Let $C$ be a [[Definition:Curve|curve]] embedded in a [[Definition:Plane|plane]] defined by [[Definition:Polar Coordinates|polar coordinates]]. +Let $P$ be the [[Definition:Point|point]] at $\polar {r, \theta}$. +Then the [[Definition:Angle|angle]] $\psi$ made by the [[Definition:Tangent to Curve|tangent]] to $C$ at $P$ with the [[Definition:Radial Coordinate|radial coordinate]] is given by: +:$\tan \psi = r \dfrac {\d \theta} {\d r}$ +\end{theorem} + +\begin{proof} +:[[File:TangentToRadiusPolar.png|400px]] +{{ProofWanted}} +\end{proof}<|endoftext|> +\section{Orthogonal Trajectories/Circles Tangent to Y Axis} +Tags: Orthogonal Trajectories, Circles, Circles Tangent to Y Axis + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]]: +:$(1): \quad x^2 + y^2 = 2 c x$ +which describes the [[Definition:Locus|loci]] of [[Definition:Circle|circles]] [[Definition:Tangent to Curve|tangent]] to the [[Definition:Y-Axis|$y$-axis]] at the [[Definition:Origin|origin]]. +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$x^2 + y^2 = 2 c y$ +which describes the [[Definition:Locus|loci]] of [[Definition:Circle|circles]] [[Definition:Tangent to Curve|tangent]] to the [[Definition:X-Axis|$x$-axis]] at the [[Definition:Origin|origin]]. +:[[File:CirclesTangentAxisOrthogonalTrajectories.png|600px]] +\end{theorem}<|endoftext|> +\section{Orthogonal Trajectories/Rectangular Hyperbolas} +Tags: Orthogonal Trajectories, Hyperbolas + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]] of [[Definition:Rectangular Hyperbola|rectangular hyperbolas]]: +:$(1): \quad x y = c$ +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$x^2 - y^2 = c$ +:[[File:RectanguleHyperbolaeOrthogonalTrajectories.png|600px]] +\end{theorem} + +\begin{proof} +We use the technique of [[Definition:Formation of Ordinary Differential Equation by Elimination|formation of ordinary differential equation by elimination]]. +[[Definition:Differentiation|Differentiating]] $(1)$ {{WRT|Differentiation}} $x$ gives: +:$x \dfrac {\d y} {\d x} + y = 0$ +{{begin-eqn}} +{{eqn | l = x \frac {\d y} {\d x} + y + | r = 0 + | c = +}} +{{eqn | ll= \leadsto + | l = \frac {\d y} {\d x} + | r = -\frac y x + | c = +}} +{{end-eqn}} +Thus from [[Orthogonal Trajectories of One-Parameter Family of Curves]], the [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by: +:$\dfrac {\d y} {\d x} = \dfrac x y$ +So: +{{begin-eqn}} +{{eqn | l = \frac {\d y} {\d x} + | r = \frac x y + | c = +}} +{{eqn | ll= \leadsto + | l = \int x \rd x + | r = \int y \rd y + | c = [[Separation of Variables]] +}} +{{eqn | ll= \leadsto + | l = x^2 + | r = y^2 + c + | c = +}} +{{eqn | ll= \leadsto + | l = x^2 - y^2 + | r = c + | c = +}} +{{end-eqn}} +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Orthogonal Trajectories/Parabolas Tangent to X Axis} +Tags: Orthogonal Trajectories, Parabolas + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]] of [[Definition:Parabola|parabolas]] which are [[Definition:Tangent to Curve|tangent]] to the [[Definition:X-Axis|$x$-axis]] at the [[Definition:Origin|origin]]: +:$(1): \quad y = c x^2$ +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$x^2 + 2 y^2 = c$ +:[[File:ParabolasTangentAxisOrthogonalTrajectories.png|600px]] +\end{theorem} + +\begin{proof} +We use the technique of [[Definition:Formation of Ordinary Differential Equation by Elimination|formation of ordinary differential equation by elimination]]. +[[Definition:Differentiation|Differentiating]] $(1)$ {{WRT}} $x$ gives: +:$x \dfrac {\d y} {\d x} + y = 0$ +{{begin-eqn}} +{{eqn | n = 2 + | l = \frac {\d y} {\d x} + | r = 2 c x + | c = +}} +{{eqn | ll= \leadsto + | l = \frac {\d y} {\d x} + | r = -\frac {2 y} x + | c = eliminating $c$ between $(1)$ and $(2)$ +}} +{{end-eqn}} +Thus from [[Orthogonal Trajectories of One-Parameter Family of Curves]], the [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by: +:$\dfrac {\d y} {\d x} = -\dfrac x {2 y}$ +So: +{{begin-eqn}} +{{eqn | l = \frac {\d y} {\d x} + | r = -\dfrac x {2 y} + | c = +}} +{{eqn | ll= \leadsto + | l = \int x \rd x + | r = -2 \int y \rd y + | c = [[Separation of Variables]] +}} +{{eqn | ll= \leadsto + | l = x^2 + | r = -2 y^2 + c + | c = +}} +{{eqn | ll= \leadsto + | l = x^2 + 2 y^2 + | r = c + | c = +}} +{{end-eqn}} +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Orthogonal Trajectories/Cardioids} +Tags: Orthogonal Trajectories, Cardioids + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]] of [[Definition:Cardioid|cardioids]] given in [[Definition:Polar Coordinates|polar form]] as: +:$(1): \quad r = c \paren {1 + \cos \theta}$ +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$r = c \paren {1 - \cos \theta}$ +:[[File:CardioidsOrthogonalTrajectories.png|600px]] +\end{theorem} + +\begin{proof} +We use the technique of [[Definition:Formation of Ordinary Differential Equation by Elimination|formation of ordinary differential equation by elimination]]. +[[Definition:Differentiation|Differentiating]] $(1)$ {{WRT}} $r$ gives: +{{begin-eqn}} +{{eqn | n = 2 + | l = \frac {\d r} {\d \theta} + | r = - c \sin \theta + | c = +}} +{{eqn | ll= \leadsto + | l = \frac {\d r} {\d \theta} + | r = -\frac {r \sin \theta} {1 + \cos \theta} + | c = eliminating $c$ between $(1)$ and $(2)$ +}} +{{eqn | ll= \leadsto + | l = r \frac {\d \theta} {\d r} + | r = -\frac {1 + \cos \theta} {\sin \theta} + | c = +}} +{{end-eqn}} +Thus from [[Orthogonal Trajectories of One-Parameter Family of Curves]], the [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by: +:$r \dfrac {\d \theta} {\d r} = \dfrac {\sin \theta} {1 + \cos \theta}$ +So: +{{begin-eqn}} +{{eqn | l = r \dfrac {\d \theta} {\d r} + | r = \frac {\sin \theta} {1 + \cos \theta} + | c = +}} +{{eqn | ll= \leadsto + | l = \int \frac {\d r} r + | r = \int \frac {1 + \cos \theta} {\sin \theta} \rd \theta + | c = [[Separation of Variables]] +}} +{{eqn | r = \int \paren {\csc \theta + \cot \theta} \rd \theta + | c = +}} +{{eqn | ll= \leadsto + | l = \ln r + | r = \ln \size {\csc \theta - \cot \theta} + \ln \size {\sin \theta} + c + | c = +}} +{{eqn | r = \ln \size {\paren {\csc \theta - \cot \theta} \sin \theta} + c + | c = +}} +{{eqn | r = \ln \size {1 - \cos \theta} + c + | c = +}} +{{eqn | ll= \leadsto + | l = r + | r = c \paren {1 - \cos \theta} + | c = +}} +{{end-eqn}} +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Orthogonal Trajectories/Exponential Functions} +Tags: Orthogonal Trajectories + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]] of [[Definition:Graph of Mapping|graphs]] of the [[Definition:Real Exponential Function|exponential function]]: +:$(1): \quad y = c e^x$ +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$y^2 = -2 x + c$ +:[[File:ExponentialsOrthogonalTrajectories.png|600px]] +\end{theorem} + +\begin{proof} +We use the technique of [[Definition:Formation of Ordinary Differential Equation by Elimination|formation of ordinary differential equation by elimination]]. +[[Definition:Differentiation|Differentiating]] $(1)$ {{WRT|Differentiation}} $x$ gives: +:$\dfrac {\d y} {\d x} = c e^x$ +{{begin-eqn}} +{{eqn | n = 2 + | l = \frac {\d y} {\d x} + | r = c e^x + | c = +}} +{{eqn | ll= \leadsto + | l = \frac {\d y} {\d x} + | r = y + | c = eliminating $c$ between $(1)$ and $(2)$ +}} +{{end-eqn}} +Thus from [[Orthogonal Trajectories of One-Parameter Family of Curves]], the [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by: +:$\dfrac {\d y} {\d x} = -\dfrac 1 y$ +So: +{{begin-eqn}} +{{eqn | l = \frac {\d y} {\d x} + | r = -\dfrac 1 y + | c = +}} +{{eqn | ll= \leadsto + | l = \int y \rd y + | r = -\int \rd x + | c = [[Separation of Variables]] +}} +{{eqn | ll= \leadsto + | l = y^2 + | r = -2 x + c + | c = +}} +{{end-eqn}} +Hence the result. +{{qed}} +\end{proof}<|endoftext|> +\section{Orthogonal Trajectories/Parabolas with Focus at Origin} +Tags: Orthogonal Trajectories + +\begin{theorem} +Consider the [[Definition:One-Parameter Family of Curves|one-parameter family of curves]] of [[Definition:Parabola|parabolas]] whose [[Definition:Focus of Parabola|focus]] is at the [[Definition:Origin|origin]] and whose [[Definition:Axis of Parabola|axis]] is the [[Definition:X-Axis|$x$-axis]]: +:$(1): \quad y^2 = 4 c \paren {x + c}$ +Its [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by the equation: +:$y^2 = 4 c \paren {x + c}$ +:[[File:ParabolasFocusOriginOrthogonalTrajectories.png|600px]] +\end{theorem} + +\begin{proof} +We use the technique of [[Definition:Formation of Ordinary Differential Equation by Elimination|formation of ordinary differential equation by elimination]]. +[[Definition:Differentiation|Differentiating]] $(1)$ {{WRT|Differentiation}} $x$ gives: +{{begin-eqn}} +{{eqn | n = 2 + | l = 2 y \frac {\d y} {\d x} + | r = 4 c + | c = +}} +{{eqn | ll= \leadsto + | l = c + | r = \frac y 2 \frac {\d y} {\d x} + | c = +}} +{{eqn | ll= \leadsto + | l = y^2 + | r = 2 y \frac {\d y} {\d x} \paren {x + \frac y 2 \frac {\d y} {\d x} } + | c = substituting for $c$ into $(1)$ +}} +{{eqn | r = 2 x y \frac {\d y} {\d x} + y^2 \paren {\frac {\d y} {\d x} }^2 + | c = +}} +{{end-eqn}} +Thus from [[Orthogonal Trajectories of One-Parameter Family of Curves]], the [[Definition:Orthogonal Trajectories|family of orthogonal trajectories]] is given by: +{{begin-eqn}} +{{eqn | l = y^2 + | r = -2 x y \frac {\d x} {\d y} + y^2 \paren {-\frac {\d x} {\d y} }^2 + | c = +}} +{{eqn | ll= \leadsto + | l = y^2 \paren {\frac {\d x} {\d y} }^2 + | r = 2 x y \frac {\d x} {\d y} + y^2 + | c = +}} +{{end-eqn}} +{{Finish}} +\end{proof}<|endoftext|> +\section{Half-Life of Radioactive Substance} +Tags: Radioactive Decay + +\begin{theorem} +Let a [[Definition:Radioactive Element|radioactive element]] $S$ [[Definition:Radioactive Decay|decay]] with a [[Definition:Rate Constant|rate constant]] $k$. +Then its [[Definition:Half-Life|half-life]] $T$ is given by: +:$T = \dfrac {\ln 2} k$ +:[[File:HalfLife.png|400px]] +\end{theorem} + +\begin{proof} +Let $x_0$ be the quantity of $S$ at [[Definition:Time|time]] $t = 0$. +At [[Definition:Time|time]] $t = T$ the quantity of $S$ has been reduced to $x = \dfrac {x_0} 2$. +This gives: +{{begin-eqn}} +{{eqn | l = x_0 e^{-k T} + | r = \frac {x_0} 2 + | c = [[First-Order Reaction]] +}} +{{eqn | ll= \leadsto + | l = e^{k T} + | r = 2 + | c = +}} +{{eqn | ll= \leadsto + | l = k T + | r = \ln 2 + | c = +}} +{{eqn | ll= \leadsto + | l = T + | r = \frac {\ln 2} k + | c = +}} +{{end-eqn}} +{{qed}} +\end{proof}<|endoftext|> +\section{Density not greater than Weight} +Tags: Denseness + +\begin{theorem} +Let $T = \left({S, \tau}\right)$ be a [[Definition:Topological Space|topological space]]. +Then +:$d \left({T}\right) \leq w \left({T}\right)$ +where +:$d \left({T}\right)$ denotes the [[Definition:Density of Topological Space|density]] of $T$, +:$w \left({T}\right)$ denotes the [[Definition:Weight of Topological Space|weight]] of $T$. +\end{theorem} + +\begin{proof} +By definition of [[Definition:Weight of Topological Space|weight]] there exists a [[Definition:Analytic Basis|basis]] $\mathcal B$ of $T$: +:$w \left({T}\right) = \left\vert{\mathcal B}\right\vert$ +where $\left\vert{\mathcal B}\right\vert$ denotes the [[Definition:Cardinality|cardinality]] of $\mathcal B$. +By [[Axiom:Axiom of Choice|Axiom of Choice]] define a [[Definition:Mapping|mapping]] $f: \left\{{U \in \mathcal B: U \ne \varnothing}\right\} \to S$: +:$\forall U \in \mathcal B: U \ne \varnothing \implies f \left({U}\right) \in U$ +We will prove that +:$\forall U \in \tau: U \ne \varnothing \implies U \cap \operatorname{Im} \left({f}\right) \ne \varnothing$ +where $\operatorname{Im} \left({f}\right)$ denotes the [[Definition:Image of Mapping|image]] of $f$. +Let $U \in \tau$ such that +:$U \ne \varnothing$ +By definition of [[Definition:Empty Set|empty set]]: +:$\exists x: x \in U$ +Then by definition of [[Definition:Analytic Basis|basis]]: +:$\exists V \in \mathcal B: x \in V \subseteq U$ +By definition of [[Definition:Image of Mapping|image]]: +:$f \left({V}\right) \in \operatorname{Im} \left({f}\right)$ +By definition of $f$: +:$f \left({V}\right) \in V$ +By definition of [[Definition:Subset|subset]]: +:$f \left({V}\right) \in U$ +Then by definition of [[Definition:Set Intersection|intersection]]: +:$f \left({V}\right) \in U \cap \operatorname{Im} \left({f}\right)$ +Thus by definition of [[Definition:Empty Set|empty set]]: +:$U \cap \operatorname{Im} \left({f}\right) \ne \varnothing$ +Then: +:$\forall x \in S: \forall U \in \tau: x \in U \implies U \cap \operatorname{Im} \left({f}\right) \ne \varnothing$ +Hence by [[Condition for Point being in Closure]]: +:$\forall x \in S: x \in \left( {\operatorname{Im} \left({f}\right)}\right)^-$ +where $^-$ denotes the [[Definition:Closure (Topology)|topological closure]]. +Then by definition of [[Definition:Subset|subset]]: +:$S \subseteq \left( {\operatorname{Im} \left({f}\right)}\right)^- \subseteq S$ +Thus by definition of [[Definition:Set Equality|set equality]]: +:$S = \left( {\operatorname{Im} \left({f}\right)}\right)^-$ +Thus by definition: +:$\operatorname{Im} \left({f}\right)$ is [[Definition:Everywhere Dense|dense]] +By definition of [[Definition:Density of Topological Space|density]]: +:$d \left({T}\right) \leq \left\vert{ \operatorname{Im} \left({f}\right) }\right\vert$ +$f$ as $\left\{{U \in \mathcal B: U \ne \varnothing} \right\} \to \operatorname{Im} \left({f}\right)$ by definition is [[Definition:Surjection|surjection]]. +Therefore by [[Surjection iff Cardinal Inequality]]: +:$\left\vert{\operatorname{Im} \left({f}\right)}\right\vert \leq \left\vert{\left\{{U \in \mathcal B: U \ne \varnothing} \right\}}\right\vert$ +By definition of [[Definition:Subset|subset]]: +:$\left\{{U \in \mathcal B: U \ne \varnothing} \right\} \subseteq \mathcal B$ +Then by [[Subset implies Cardinal Inequality]]: +:$\left\vert{\left\{{U \in \mathcal B: U \ne \varnothing} \right\}}\right\vert \leq \left\vert{\mathcal B}\right\vert$ +Thus: +:$d \left({T}\right) \leq w \left({T}\right)$ +{{qed}} +\end{proof}<|endoftext|> +\section{Temperature of Body under Newton's Law of Cooling} +Tags: Thermodynamics + +\begin{theorem} +Let $B$ be a [[Definition:Body|body]] in an environment whose ambient [[Definition:Temperature|temperature]] is $H_a$. +Let $H$ be the [[Definition:Temperature|temperature]] of $B$ at time $t$. +Let $H_0$ be the [[Definition:Temperature|temperature]] of $B$ at time $t = 0$. +Then: +:$H = H_a - \paren {H_0 - H_a} e^{-k t}$ +where $k$ is some [[Definition:Positive Real Number|positive]] [[Definition:Constant|constant]]. +\end{theorem} + +\begin{proof} +By [[Newton's Law of Cooling]]: +:The [[Definition:Rate|rate]] at which a hot [[Definition:Body|body]] loses [[Definition:Heat|heat]] is [[Definition:Proportion|proportional]] to the difference in [[Definition:Temperature|temperature]] between it and its surroundings. +We have the [[Definition:First Order Ordinary Differential Equation|differential equation]]: +:$\dfrac {\d H} {\d t} \propto - \paren {H - H_a}$ +That is: +:$\dfrac {\d H} {\d t} = - k \paren {H - H_a}$ +where $k$ is some [[Definition:Positive Real Number|positive]] [[Definition:Constant|constant]]. +This is an instance of the [[Decay Equation]], and so has a solution: +:$H = H_a + \paren {H_0 - H_a} e^{-k t}$ +{{qed}} +{{Namedfor|Isaac Newton|cat = Newton}} +\end{proof} \ No newline at end of file