\documentclass{article}
\usepackage{xltxtra}
\usepackage{ctex}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{geometry}
\usepackage{booktabs}
\usepackage{graphicx}
\usepackage{float}
\usepackage{geometry}
\usepackage{booktabs}
\usepackage{tikz}
\usepackage{listings}

\makeatletter
\newcommand{\Rmnum}[1]{\expandafter\@slowromancap\romannumeral #1@}
\makeatother
\geometry{a4paper,top=1.5cm,bottom=1.5cm}
\title{Report for Chapter 4.}
\author{张皓祥 \\ 3200102536 强基数学2001}
\date{}

\begin{document}
\maketitle

\section{Theoretical questions}

\subsection*{\Rmnum{1}. Convert the decimal integer 477 to a normalized FPN with $\beta = 2$.}

Divide 477 we have $477=2^8 + 2^7 + 2^6 + 2^4 + 2^3 + 2^2 + 2^0 + (111011101)_2$. Then convert it 
to a normalized FPN with $\beta f=2,$ it's $(1.11011101)_2\times 2^8.$ 

\subsection*{\Rmnum{2}. Convert the decimal fraction 3/5 to a normalized FPN with $\beta = 2$.}

Mutiply $\frac{3}{5}$ by $2$, we have 

\begin{center}
\begin{tabular}{|c|c|c|}
    \hline
    Initial & Mutiple by 2 & output \\
    \hline
    $\frac{3}{5}$ & $\frac{6}{5}(\frac{1}{5})$ & 1 \\
    \hline
    $\frac{1}{5}$ & $\frac{2}{5}$ & 0 \\
    \hline
    $\frac{2}{5}$ & $\frac{4}{5}$ & 0 \\
    \hline
    $\frac{4}{5}$ & $\frac{8}{5}(\frac{3}{5})$ & 1 \\
    \hline
\end{tabular}
\end{center}
i.e. $\frac{3}{5}=(0.1001 1001\cdots)_2.$ The normalized FPN with $\beta =2 $ of it is 
$(1.0011001)_2 \cdots \times 2^{-1}.$

\subsection*{\Rmnum{3}. Prove $x_R -x = \beta (x-x_L)$.}

Assume the precision number is $p$, $x_R = (1 + \beta^{1-p})\times \beta^e,$
$x_L= (\beta - \beta^{1-p})\times \beta^{e-1}$. Thus we have 
$x_R-x = \beta^{1-p}\times\beta^e=\beta\cdot(\beta^{1-p}\times\beta^{e-1})=\beta(x-x_L)$.

\subsection*{\Rmnum{4}. What is $fl(x)$ and the relative roundoff error?}

For single-precision protocol, $\beta=2,p=32+1,e\in[-126,127].$ By the result of \Rmnum{2}, 
$\frac{3}{5}=(1.0011001\cdots)_2\times 2^{-1}.$ Since $p=24,$
\begin{align*}
    x_L & = (1.00110011001100110011001)_2\times 2^{-1} \\
    x_R & = (1.00110011001100110011010)_2\times 2^{-1}
\end{align*}
Since the next number of $\frac{3}{5}$ in the form of normalized FPN is $1$, $x_R$ is closer 
than $x_L$. Thus $fl(x)=x_R=(1.00110011001100110011010)_2\times 2^{-1}.$ 

Since $x_R-x_L=2^{-24}, x-x_L=\frac{3}{5}\times 2^{-24},$ We have 
$x_R-x = (x_R-x_L)-(x-x_L)=\frac{2}{5}\times 2^{-24}.$ The relative roundoff error is 
$E_{rel}(x)=\frac{x_R-x}{x}=\frac{\frac{2}{5}\times 2^{-24}}{(\frac{3}{5})}=\frac{2}{3}\times 2^{-24}.$

\subsection*{\Rmnum{5}. What would the unit roundoff be?}

If it simply drops excess bits, then for $x,x_R$ and $x_L$ are the two nearest normalized FPN 
elements of $x$ and $x_L<x<x_R.$ Denote $x = x_L+\delta_x$, where $\delta_x\in[0,\epsilon_M).$ 
The error will be $\delta_x$ and the unit roundoff will be $\sup\delta_x=\epsilon_M,$ i.e. the 
unit roundoff is $\epsilon_M=2^{-23}.$

\subsection*{\Rmnum{6}. Losing bits of precision in the subtraction
$1 - \cos x$ when $x = \frac{1}{4}$.}

By calculation, pick $x=1,y=\cos (\frac{1}{4}),x>y>0$, we have 
$1-\frac{y}{x}=1-\cos(\frac{1}{4})\approx 0.03109.$ 
For $\beta=2,\log_2(0.03109)\approx -5.0074, 2^{-6}<1-\frac{y}{x}<2^{-5}.$ 
By Theorem 4.49, the number of most significant digits that are lost in the subtraction 
$1-\cos(\frac{1}{4})$ is at most 6 and at least 5.

\subsection*{\Rmnum{7}. Suggest at least two ways to compute $1-\cos x$ to avoid 
catastrophic cancellation caused by subtraction.}

\noindent $\bullet$ Method 1. use Taylot series to calculate, 
$$
1-\cos x = 1 - (1-\frac{x^2}{2!}+\frac{x^4}{4!}-\frac{x^6}{6!}+\cdots) 
= \frac{x^2}{2!}-\frac{x^4}{4!}+\frac{x^6}{6!}+\cdots
$$

\noindent $\bullet$ Method 2. 

By Lagarange Theorem, for $f(x)=1-\cos x, 
f(x) = f(0)+f'(0)x+\frac{f''(0)}{2}x^2+o(x^2).$ Hence we can use $\frac{x^2}{2}$ to calculate.

\noindent $\bullet$ Method 3. Use duplication formula we have $1-\cos x=1 - (1-2\sin^2\frac{x}{2})=2\sin^2\frac{x}{2}$, 
it can avoid catastrophic cancellation.

\subsection*{\Rmnum{8}. What are the condition numbers of the following functions?}

\noindent $\bullet \quad (x-1)^{\alpha}$.

\noindent For $f(x)=(x-1)^{\alpha},C_f(x)=|\frac{xf'(x)}{f(x)}|=|\frac{x\cdot \alpha(x-1)^{\alpha-1}}{(x-1)^{\alpha}}|
=|\frac{\alpha x}{x-1}|$. It will be large when $x\to 1.$

\bigskip
\noindent $\bullet \quad \ln x$.

\noindent For $f(x)=\ln x, C_f(x) = |\frac{xf'(x)}{f(x)}|=|\frac{x\cdot \frac{1}{x}}{\ln x}|
=|\frac{1}{\ln x}|.$ It will be large when $x\to 1.$

\bigskip
\noindent $\bullet \quad e^x$.

\noindent For $f(x)=e^x, C_f(x) = |\frac{xf'(x)}{f(x)}|=|\frac{xe^x}{e^x}|=|x|.$ It will be 
large when $x\to \infty.$

\bigskip
\noindent $\bullet \quad \arccos x$.

\noindent For $f(x)=\arccos x, C_f(x)=|\frac{xf'(x)}{f(x)}| = |\frac{x}{\sqrt{1-x^2}\arccos x}|.$ 
It will be large when $x\to 1^-$ or $x\to -1^+$.

\subsection*{\Rmnum{9}. Consider the function $f(x) = 1 - e^{-x}$ for $x \in [0, 1]$.}

\subsubsection*{$\bullet$ Show that cond$_f(x)\leq 1$ for $x\in[0,1]$.}

$cond_f(x)=|\frac{xf'(x)}{f(x)}|=\frac{xe^{-x}}{1-e^{-x}}$ when $x\in[0,1].$ 
Let $g(x)=\frac{xe^{-x}}{1-e^{-x}}.$

$g'(x)=\frac{(1-x)e^{-x}(1-e^{-x})-xe^{-x}\cdot e^{-x}}{(1-e^{-x})^2}=
\frac{(1-x-e^{-x})e^{-x}}{(1-e^{-x})^2}.$ Since $e^{-x}\geq 1+(-x)=1-x,$ it implies that 
$g'(x)\leq 0,$ which means $g(x)$ decreases with $x$. Thus 
$$
cond_f(x)\leq \lim\limits_{x\to 0+}g(x)= \lim\limits_{x\to 0+}\frac{xe^{-x}}{1-e^{-x}}
=\lim\limits_{x\to 0+} \frac{(1-x)e^{-x}}{e^{-x}}=1\quad \forall x\in [0,1].
$$

\subsubsection*{$\bullet$ Estimate cond$_A(x)$ for $x\in[0,1]$.}

\noindent By definition, $f_A(x)=fl[1-fl(e^{-fl(x)})],$ 
where $|\delta_i|\leq \epsilon_u$ for $i=1,2,3.$ Then neglecting the quadratic terms of $O(\delta_i^2)$ 
and $e^{-x\delta_1}\sim 1-x\delta_1$
$$
f_A(x)=(1-e^{(-x)(1+\delta_1)}(1+\delta_2))(1+\delta_3)
=(1-e^{-x})(1+(\delta_2+\delta_3-x\delta_1)-\frac{\delta_2-x\delta_1}{1-e^{-x}}).
$$
Hence by $x\in[0,1],$ we can choose $\varphi (x)=3+\frac{2}{1-e^{-x}}.$ By Theorem 4.76, 
$$
cond_A(x)\leq \frac{\varphi(x)}{cond_f(x)}
=\frac{5-3e^{-x}}{xe^{-x}}.
$$

\subsubsection*{$\bullet$ Plot cond$_f(x)$ and the estimated upper bound of cond$_A(x)$ 
of $x$ on $[0,1]$.}

Since $cond_f(x)=\frac{xe^{-x}}{1-e^{-x}}$, it's decreasing with $x$, we plot the figure as follow:
\begin{figure}[H]
    \centering
    \includegraphics[scale=0.55]{../src/condf.png} \label{FigCondf}
\end{figure}
For $cond_A(x)\leq \frac{5-3e^{-x}}{xe^{-x}}:=g(x),$ view it 
as a funciton, plot it as follow:
\begin{figure}[H]
    \centering
    \includegraphics[scale=0.55]{../src/condA.png} \label{FigCondA}
\end{figure}
By the estimate, when $x\to 0+,cond_A(x)\to +\infty,$ which means the relative error will be too large and 
we may get a catastrophic cancellation.

\subsection*{\Rmnum{10}. For the Wilkinson example, compute your condition number, and compare your result with
that in the Wilkinson Example.}

For $r = f(a_0,a_1,\cdots,a_{n-1})\neq 0,a_i(x)=|\frac{a_i \frac{\partial r}{\partial a_i}}{r}|.$ 
Since $r$ is the root of $p(x),$ $\sum\limits_{i=0}^{n-1}a_{i}r^{i}=0,$ we have  
$\frac{\partial r}{\partial a_i} = -\frac{r^i}{\sum\limits_{j=1}^{n-1}ja_{j}r^{j-1}} = -\frac{r^i}{p'(r)}.$
$a_i(x)=|\frac{a_i r^{i-1}}{p'(r)}|.$ It implies that 
$$
cond_f(x) = \left\lVert A(x)\right\rVert _1 = \max\limits_i a_{i}(x) = 
\max\limits_i |\frac{a_i r^{i-1}}{p'(r)}|.
$$
Put into Wilkinson example, consider the condition number for 
$f(x)=\prod\limits_{k=1}^{p}(x-k),$ at point $p$, it's 
$cond_f(x) = \max\limits_i|\frac{a_i p^{i-1}}{(p-1)!}|\geq \frac{\sum\limits_{k=1}^{p}kp^{p-2}}{(p-1)!}
= \frac{(p+1)p^{p-1}}{2(p-1)!}.$ The estimate is of the same order as the example. It implies that 
the difficulty of solving polynomials with high degrees comes from its high condition number.

\subsection*{\Rmnum{11}. Give an example that contradicts the conclusion of the model of machine arithmetic.}

Consider the FPNs system (2, 2, -1, 1) and $a = (1.0)_2\times 2^0, b = (1.1)_2\times 2^0.$ 
Then $\frac{a}{b}=\frac{1}{1.5}=\frac{2}{3}=(0.1010\cdots)_2 = (1.01010\cdots)_2\times 2^{-1}$ 
by Example 4.10. But if the register of precision is $2p=4,$ pick 4 number, it's 
$(0.101)_2 = (1.01)\times 2^{-1},$ i.e. $fl(\frac{a}{b})=(1.0)_2\times 2^{-1}=\frac{1}{2}$. 
Thus $E_{rel}(\frac{a}{b})=|\frac{fl(\frac{a}{b})-\frac{a}{b}}{(\frac{a}{b})}| = 
|\frac{\frac{1}{2}-\frac{2}{3}}{(\frac{2}{3})}|=0.25 = 2^{-2} = \frac{1}{2}\times 2^{1-2}=\epsilon_u$ 
contradict to the model of machine arithmetic that $|\delta|<\epsilon_u$ for division.

\subsection*{\Rmnum{12}. Can we compute the root with absolute accuracy $< 10^{-6}$? Why?}

No, we can't. $128 = 2^7$, then the root in the interval $[128,129]$ will be represented as 
$m\times 2^{7},m=(1.xxx\cdots)_2$. 
Consider that the single precision FPNs of IEEE 754 has only 24 bits of precision, 
we can represent a decimal number precisely no more than $2^24=16777216$, i.e. we can only represent 
a decimal number with no more than 8 precise number. But if we want to compute the root with 
the absolute accuracy $<10^{-6}$, we need at least $3+6=9$ precise numbers. That's rediculous.

\subsection*{\Rmnum{13}. Use the condition number of a matrix to explain this phenomenon.}

For $s(x) = ax^3+bx^2+cx+d,$ we need to know the values of $s(x),s'(x)$ at the point $x_i,x_{i+1}$. 
Note that $s'(x) = 3ax^2 + 2bx + c,a,b,c,d$ can be calculated by 
\begin{equation*}
    \begin{cases}
        ax_{i}^3+bx_{i}^2+cx_{i}+d = f(x_{i}) \\
        ax_{i+1}^3 + bz_{i+1}^2+cx_{i+1}+d = f(x_{i+1}) \\
        3ax_{i}^2 + 2bx_{i} + c = f'(x_i) \\
        3ax_{i+1}^2 + 2bx_{i+1} + c = f'(x_{+1})
    \end{cases}
    \Leftrightarrow
    \left[
    \begin{matrix}
        x_{i}^3 & x_{i}^2 & x_{i} & 1 \\
        x_{i+1}^3 & x_{i+1}^2 & x_{i+1} & 1 \\
        3x_{i}^2 & 2x_i & 1 & 0 \\
        3x_{i+1}^2 & 2x_{i+1} & 1 & 0
    \end{matrix}
    \right] \left[
    \begin{matrix}
        a \\ b \\ c \\ d
    \end{matrix}
    \right]
    =
    \left[
    \begin{matrix}
        f(x_i) \\ f(x_{i+1}) \\ f'(x_i) \\ f'(x_{i+1})
    \end{matrix}
    \right]
\end{equation*}
Denote the cofficients matrix as A, then the maximal condition number is 
$$
\max cond_f(x) = \left\lVert A\right\rVert _2 \left\lVert A^{-1}\right\rVert _2
= \frac{\sigma_{\max}}{\sigma_{\min}}.
$$
Consider the characteristic polynomial $f_A(t)=\det (A-tI_3).$ If $x_i=x_{i+1},$ 
$rank(A)=2,t^2|f_A(t)$, i.e. $f_A(t)$ has two eigenvalues 0. 
If the distance between $x_i$ and $x_{i+1}$ is too small, the eigenvalue of $f_A(t)$ 
will be very close to 0, hence $\sigma_{\min}$ is very close to 0, the condition 
number will get large. Thus for the solution, it will get inaccurate results.

\newpage
\section{Programming assignments}

\subsection*{A. Print values of the functions in $(4.49)$ at $101$ equally spaced points.}

Plot the values calculate by three function as follow:
\begin{figure}[H]
    \centering
    \includegraphics[scale=0.45]{../src/ProgrammingA.png} \label{FigProA}
\end{figure}
As figure, the values calculated by $f(x)$ and $g(x)$ are unstable since the spline of 
them are shaking violently. For $h(x)$, it's much more fluent. Note that the function is 
fluent and near $y = 0$ when $x$ is near 1 by mathmetical analysis, the conclusion 
calculated by $h(x)$ is the most accurate.

No matter we use $f(X)$ or $g(x)$, we both need to calculate $\times 70,56,28,8,$ which will 
enlarge the error into $70,56,28,8$ times and make the relative error get large. But $h(x)$ 
only needs to calculate $fl(x-1)$ and $fl(0*0)$, which are less interpreted by the calculation 
itself. In other words, all calculation by $h(x)$ near 1 will be less likely to 
lose the precision, that's why it's most accurate.

\subsection*{B. Consider a normalized FPN system 
$\mathbb{F}$ with the characterization.}

For a normalized FPN system $(2, 3, -1, 1),$ by calculation, we can have information 
as follow:

\noindent $\bullet$ 
$URL(\mathbb{F}) = (1.00)_2\times 2^{-1} = 0.5, OFL(\mathbb{F}) = (1.11)_2\times 2^{1} = 3.5$

\noindent $\bullet$ All numbers in $\mathbb{F}$ are 0.5 -0.5 0.625 -0.625 0.75 -0.75 0.571429 -0.571429 1 -1 1.25 -1.25 1.5 -1.5 1 -1 2 -2 2.5 -2.5 3 -3 1.75 -1.75 0. 
The cardinality of it is 25, it's equally to $2^(3)\times (1-(-1)+1)+1$ as Corollary 4.19. 
That's verified.

\noindent $\bullet$ The plot of $\mathbb{F}$ is as follow:
\begin{figure}[H]
    \centering
    \includegraphics[scale=0.45]{../src/ProB1.png} \label{FigB1}
\end{figure}

\noindent $\bullet$ All the subnormal numbers of $\mathbb{F}$ are 0.125 -0.125 0.25 -0.25 0.375 -0.375.

\noindent $\bullet$ The plot the extended $\mathbb{F}$ is as follow:
\begin{figure}[H]
    \centering
    \includegraphics[scale=0.45]{../src/ProB2.png} \label{FigB2}
\end{figure}

\end{document}