\documentclass{article}
\usepackage{xeCJK}
\usepackage[affil-it]{authblk}
\usepackage[dvipsnames]{xcolor}
\usepackage{amssymb}
\usepackage{verbatim}
\usepackage{indentfirst}
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{geometry}
\usepackage{float}
\usepackage{listings}
\usepackage{graphicx} % Required for figures
\usepackage{subcaption} % Required for subfigures
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}
\begin{document}
% ==================================================
\title{Numerical Analysis Homework \#4}

\author{周川迪 Zhou Chuandi 3220101409}
\affil{强基数学2201}

\date{\today}

\maketitle

\begin{abstract}
  Exercises in Chapter 4 and 4.4.1 Theoretical questions.

  Theorems or Corollaries are referred from \textit{handoutsNUMPDEs-2024-08-17}.
\end{abstract}



\section{Exercises}

\subsection{Exercise 4.33}

\[a=1.234 \times 10^4,\ c:=\operatorname{fl}(a+b),\ \mathcal{F}:(10,4,-7,8) \]

\((1)\ b=8.769 \times 10^4\)

\[(i)\ e_c \leftarrow 4\ (ii)\ m_c \leftarrow 10.003\ (iii)\ m_c \leftarrow 1.0003;\ e_c \leftarrow 5\ (v)\ m_c \leftarrow 1.000 \]
\[\Rightarrow c = 1.000 \times 10^5 \]

\((2)\ b=-5.678 \times 10^0\)

\[(i)\ b \leftarrow -0.0005678 \times 10^4;\ e_c \leftarrow 4\ (ii)\ m_c \leftarrow 1.2334322\ (v)\ m_c \leftarrow 1.233 \]
\[\Rightarrow c = 1.233 \times 10^4 \]

\((3)\ b=-5.678 \times 10^3\)

\[(i)\ b \leftarrow -0.5678 \times 10^4;\ e_c \leftarrow 4\ (ii)\ m_c \leftarrow 0.6662\ (iii)\ m_c \leftarrow 6.662;\ e_c \leftarrow 3\]
\[\Rightarrow c = 6.662 \times 10^3 \]


\subsection{Exercise 4.42}
\subsubsection*{Example 1: \(\mathcal{F}:(2,3,0,5);\ a_i=\{1,7,8\}=\{(1)_2 \times 2^0,(1.11)_2 \times 2^2,(1)_2 \times 2^3\};\ a_1+a_2+a_3=16=(1)_2 \times 2^4 \)}

Descending Order:
\[\operatorname{fl}((1.11)_2 \times 2^2+(1)_2 \times 2^3)=\operatorname{fl}((1.111)_2 \times 2^3)=(1.11)_2 \times 2^3 \]
\[\operatorname{fl}((1.11)_2 \times 2^3 + (1)_2 \times 2^0)=\operatorname{fl}((1.111)_2 \times 2^3)=(1.11)_2 \times 2^3=14 \]

Ascending Order:
\[\operatorname{fl}((1)_2 \times 2^0 + (1.11)_2 \times 2^2)=\operatorname{fl}((10.00)_2 \times 2^2)=(1)_2 \times 2^3 \]
\[\operatorname{fl}((1)_2 \times 2^3 + (1)_2 \times 2^3)=\operatorname{fl}((10)_2 \times 2^3)=(1)_2 \times 2^4=16 \]

Since \(|14-16|<|16-16| \), ascending order has smaller rounding error.


\subsubsection*{Example 2: \(\mathcal{F}:(10,3,0,5);\ a_i=\{888,787,666\};\ a_1+a_2+a_3=2341 \)}

Descending Order:
\[\operatorname{fl}(8.88 \times 10^2 + 7.87 \times 10^2)=\operatorname{fl}(16.75 \times 10^2)=1.68 \times 10^3 \]
\[\operatorname{fl}(1.68 \times 10^3 + 6.66 \times 10^2)=\operatorname{fl}(2.346 \times 10^3)=2.35 \times 10^3 \]

Ascending Order:
\[\operatorname{fl}(6.66 \times 10^2 + 7.87 \times 10^2)=\operatorname{fl}(14.53 \times 10^2)=1.45 \times 10^3 \]
\[\operatorname{fl}(1.45 \times 10^3 + 8.88 \times 10^2)=\operatorname{fl}(2.338 \times 10^3)=2.34 \times 10^3 \]

Since \(|2340-2341|<|2350-2341| \), ascending order has smaller rounding error.


\subsubsection*{Example 3: \(\mathcal{F}:(10,3,0,10);\ a_i=\{9,9140,9200\};\ a_1+a_2+a_3=18349 \)}

Descending Order:
\[\operatorname{fl}(9.20 \times 10^3 + 9.14 \times 10^3)=\operatorname{fl}(18.34 \times 10^3)=1.83 \times 10^4 \]
\[\operatorname{fl}(1.83 \times 10^4 + 9 \times 10^0)=\operatorname{fl}(1.8309 \times 10^4)=1.83 \times 10^4 \]

Ascending Order:
\[\operatorname{fl}(9 \times 10^0 + 9.14 \times 10^3)=\operatorname{fl}(9.149 \times 10^3)=9.15 \times 10^3 \]
\[\operatorname{fl}(9.15 \times 10^3 + 9.2 \times 10^3)=\operatorname{fl}(1.835 \times 10^4)=1.84 \times 10^4 \]

Since \( |18400-18349|>|18300-18349| \), descending order has smaller rounding error! 

\subsubsection*{Conclusion}
In fact, ascending order ensures a stricter upper bound of rounding error \(\delta\). 

However, there exists an example where ascending order does NOT necessarily minimize the rounding error, particularly in the context of decimal representations. 

It's possible that in the binary system used by computers, ascending order could indeed minimize rounding errors.




\subsection{Exercise 4.43}
\text{We have to assume that \(a_i,b_i>0\), otherwise Theorem 4.41 doesn't work!}

Similar to \text{Theorem 4.41}, \(\Pi_{i=0}^n a_i \) holds the same property.

\[\operatorname{fl}(a_1 b_1+a_2 b_2+a_3 b_3)=(1+\delta')[\operatorname{fl}(a_1 b_1)+\operatorname{fl}(a_2 b_2)+\operatorname{fl}(a_3 b_3)],\ |\delta'|<(1+\epsilon_u)^2-1 \approx 2\epsilon_u \]
\[\text{for}\ i=1,2,3,\ \operatorname{fl}(a_i b_i)=(1+\delta_i)(a_i b_i),\ |\delta_i|<\epsilon_u \]

Since \[|\delta_1 a_1 b_1 +\delta_2 a_2 b_2+\delta_3 a_3 b_3| \leq \max{\{|\delta_1|,|\delta_2|,|\delta_3|\}}(a_1 b_1 + a_2 b_2 + a_3 b_3)<\epsilon_u(a_1 b_1 + a_2 b_2 + a_3 b_3)\]

We have
\[\operatorname{fl}(a_1 b_1)+\operatorname{fl}(a_2 b_2)+\operatorname{fl}(a_3 b_3)=(1+\delta^*)(a_1 b_1+a_2 b_2+a_3 b_3),\ |\delta^*|<\epsilon_u \]

Thus,
\[\operatorname{fl}(a_1 b_1+a_2 b_2+a_3 b_3)=(1+\delta')(1+\delta^*)(a_1 b_1+a_2 b_2+a_3 b_3)=(1+\delta)(a_1 b_1+a_2 b_2+a_3 b_3),\]
\[|\delta|<\epsilon_u+(1+\epsilon_u)^2-1+\epsilon_u[(1+\epsilon_u)^2-1] \approx 3\epsilon_u \]

The observations can imply that
\[|\delta_i|<(1+\epsilon_u)^m-1 \approx m\epsilon_u;\ |\delta^*|<\epsilon_u;\ |\delta'|<(1+\epsilon_u)^n-1 \approx n\epsilon_u  \]
\[\operatorname{fl} \left(\sum_{i=0}^n \prod_{j=0}^m a_{i,j}\right)=(1+\delta^*)(1+\delta')\sum_{i=0}^n \prod_{j=0}^m a_{i,j}=(1+\delta)\sum_{i=0}^n \prod_{j=0}^m a_{i,j}, \]
\[|\delta|<[(1+\epsilon_u)^m-1]+(1+\epsilon_u)^n-1+[(1+\epsilon_u)^m-1][(1+\epsilon_u)^n-1] \approx (m+n)\epsilon_u \]




\subsection{Exercise 4.80}

For \(x \in \left(0,\frac{\pi}{2}\right) \),
\[f(x)=\frac{\sin x}{1+\cos x};\ f'(x)=\frac{\cos x(1+\cos x)+\sin^2 x}{(1+\cos x)^2}=\frac{1}{1+\cos x}\]
\[ \operatorname{cond}_f=\left| \frac{xf'(x)}{f(x)} \right|=\frac{x}{\sin x} \]

On the other hand,
\[f_A=\operatorname{fl} \left[\frac{\operatorname{fl}(\sin x)}{\operatorname{fl}(1+\operatorname{fl}(\cos x))} \right] \]
\[f_A(x)=(1+\delta_1) \left[\frac{(1+\delta_2)(\sin x)}{(1+\delta_3)(1+(1+\delta_4)(\cos x))} \right] \]

Neglecting quadratic terms of \(O(\delta_i^2) \), we have \( \frac{1}{1+k\delta_i} \approx 1-k\delta_i \), where k is a proper constant.
\begin{align*}
  f_A(x)&=(1+\delta_1)(1+\delta_2) \left[\frac{\frac{(\sin x)(1+\cos x)}{(1+\delta_3)(1+(1+\delta_4)(\cos x))}}{1+\cos x} \right] \\
  &=(1+\delta_1+\delta_2)\frac{\sin x}{1+\cos x}\left[\frac{(1+\cos x)}{(1+\delta_3)(1+\cos x)+\delta_4 \cos x} \right] \\
  &=(1+\delta_1+\delta_2)\frac{\sin x}{1+\cos x}\left[\frac{1}{1+\delta_3+\frac{\delta_4 \cos x}{1+\cos x}} \right] \\
  &=\left(1+\delta_1+\delta_2-\delta_3-\frac{\delta_4 \cos x}{1+\cos x}\right)\frac{\sin x}{1+\cos x}
\end{align*}

Hence, we have \(\varphi(x)=3+\frac{\cos x}{1+\cos x} \) and
\[\operatorname{cond}_A(x) \leq \frac{\varphi(x)}{\operatorname{cond}_f(x)}=\frac{\sin x}{x}\left(3+\frac{\cos x}{1+\cos x}\right) \]

Thus, \(\operatorname{cond}_A(x)\) is controlled by \(\frac{7}{2}\) as \(x \rightarrow 0\) and controlled by \(\frac{6}{\pi}\) as \(x \rightarrow \frac{\pi}{2}\).




\section{Problems}

\subsection{I. }
\[477 \rightarrow 1.11011101 \times 2^8 \]




\subsection{II. }
\[\frac{3}{5}=\left(1+\frac{1}{5}\right) \times 2^{-1} =1 \times 2^{-1} +\frac{4}{5} \times 2^{-3} =1 \times 2^{-1} +1 \times 2^{-4} + \frac{3}{5} \times 2^{-4}=\cdots \]

Thus,
\[\frac{3}{5} \rightarrow 1.0011001100\cdots \times 2^{-1} \]




\subsection{III. }

For \(x=\beta^e=\beta \times \beta^{e-1}\), since \(x_L\) and \(x_R\) are adjacent to \(x\),
\[x_L=(\beta-\beta^{1-p})\beta^{e-1}=\beta^e-\beta^{e-p};\ x_R=(1+\beta^{1-p})\beta^e=\beta^e+\beta^{e-p+1} \]
\[\frac{x_R-x}{x-x_L}=\frac{\beta^{e-p+1}}{\beta^{e-p}}=\beta \]

Thus, \(x_R-x=\beta(x-x_L) \).




\subsection{IV. }

From \textit{IEEE 754 single-precision protocol}, we have \(\beta=2,\ p=24\) and \(x=\frac{3}{5}\). Thus,
\begin{align*}
  x&=1.001100110011001100110011\cdots \times 2^{-1} \\
  x_L&=1.00110011001100110011001 \times 2^{-1} \\
  x_R&=1.00110011001100110011010 \times 2^{-1} 
\end{align*}

Hence, \(x_R-x_L=2^{-24};\ x-x_L=1.00110011\cdots \times 2^{-25}=\frac{3}{5} \times 2^{-24} \Rightarrow x_R-x=\frac{2}{5} \times 2^{-24} \) 

Therefore, \(\operatorname{fl}(x)=x_R\) with relative roundoff error
\[\delta=\frac{|x_R-x|}{|x|}=\frac{\frac{2}{5} \times 2^{-24}}{\frac{3}{5}}=\frac{2}{3} \times 2^{-24}\]




\subsection{V. }

When rounding off numbers to the nearest, the unit roundoff \(\epsilon_u=\frac{1}{2}\epsilon_M\)

By comparison, when simply dropping excess bits, the unit roundoff is \(\epsilon_u=\epsilon_M=\beta^{1-p}=2^{-23}\). 




\subsection{VI. }

It can be calculated that

\[\cos \frac{1}{4}=(0.1111100000001010101\cdots)_2 \Rightarrow 1-cos(0.25)=(0.000001\cdots)_2\]

The highest five digits of \(\cos \frac{1}{4}\) have all ``turned into \(0\)". Thus, \(5\) bits of precision lost.




\subsection{VII. }

\subsubsection{Method 1}
By Taylor's series,
\[1 - \cos x = \frac{x^2}{2} - \frac{x^4}{4!} + \frac{x^6}{6!} - \cdots\]

It converges for \( x \in \mathbb{R} \), so we can use this method by loops.


\subsubsection{Method 2}

We have \(1-\cos x = \sin^2\left(\frac{x}{2}\right)\) and JUST DO IT.




\subsection{VIII. }
We have \(\operatorname{cond}_f=\left|\frac{xf'(x)}{f(x)}\right| \), then:
\begin{align*}
  f(x)=(x-1)^\alpha \Rightarrow &\operatorname{cond}_f=\left|\frac{\alpha x(x-1)^{\alpha-1}}{(x-1)^\alpha}\right|=\left|\frac{\alpha x}{x-1}\right|,\text{ Large at } x \rightarrow 1 \\
  f(x)=\ln x \Rightarrow &\operatorname{cond}_f=\left|\frac{x \cdot \frac{1}{x}}{\ln x}\right|=\left|\frac{1}{\ln x}\right|,\text{ Large at } x \rightarrow 1 \\
  f(x)=e^x \Rightarrow &\operatorname{cond}_f=\left|\frac{xe^x}{e^x}\right|=|x|,\text{ Large at } |x| \text{ is large.} \\
  f(x)=\arccos x \Rightarrow &\operatorname{cond}_f=\left|\frac{- x \cdot \frac{1}{\sqrt{1 - x^2}}}{\arccos x}\right|=\left|\frac{x}{\sqrt{1 - x^2}\arccos x}\right|,\text{ Large at } x \rightarrow \pm 1
\end{align*}




\subsection{IX. }

\subsubsection{}
For \(f(x)=1-e^{-x},\ x \in [0,1]\),\[\ \operatorname{cond}_f=\left|\frac{xe^{-x}}{1-e^{-x}}\right|=\frac{xe^{-x}}{1-e^{-x}}=\frac{x}{e^x-1}\].

Hence,
\[e^x \geq 1+x \Rightarrow \frac{x}{e^x-1} \leq 1,\ x \in [0,1] \] \[\Rightarrow \operatorname{cond}_f \leq 1,\ x \in [0,1] \]


\subsubsection{}

For \(|\delta_i| \leq \epsilon_u\), neglecting quadratic terms of \(O(\delta_i^2) \), we have
\begin{align*}
  f_A(x) &= (1+\delta_1)[1-(1+\delta_2)e^{-x}] \\
  &=1-e^{-x}-\delta_2e^{-x}+\delta_1(1-e^{-x}) \\
  &= (1 - e^{-x})\left(1 + \delta_1 - \delta_2 \frac{e^{-x}}{1 - e^{-x}}\right).
\end{align*}

Hence, we have \[\varphi(x) = 1 + \frac{e^{-x}}{1 - e^{-x}} = \frac{1}{1 - e^{-x}}\]

Thus,
\[\operatorname{cond}_A(x) \leq \frac{\varphi(x)}{\operatorname{cond}_f(x)} = \frac{1}{1 - e^{-x}} \cdot \frac{e^x-1}{x} = \frac{e^x}{x} \]



\subsubsection{}
We have \( \operatorname{cond}_f = \frac{x}{e^x-1} \) and \( \operatorname{cond}_A(x) \leq \frac{e^x}{x}\) and here is the figure.
\begin{figure}[H]
  \centering
  \includegraphics[width=0.8\textwidth]{"09.png"}
  \caption{Condition Numbers for f(x) and Algorithm A}
\end{figure}

My result is that when \( x = 0 \), \( f(0) = 0 \), \( f_A(x) = f(x)(1 + \delta(x)) \), \( \delta(x) \rightarrow 0 \).

So as \( x \rightarrow 0 \), \( \operatorname{cond}_A(x) \rightarrow \infty \).




\subsection{X. }

Prove \textbf{Lemma 4.68}:

\subsubsection{}
(1) \(\operatorname{cond}_{2} A := \|A\|_{2} \|A^{-1}\|_{2} = \frac{\sigma_{\max}}{\sigma_{\min}}\)
where \( \sigma_{\max} \) and \( \sigma_{\min} \) are the largest and the smallest singular values of \( A \). \\ \textbf{proof:}

By definition,
\[\|A\|_{2}=\max_{|x|=1} \|Ax\|_2=\max_{|x|=1} |x^*A^*Ax|^{\frac{1}{2}} \]

Where \(^*\) is the conjugate transpose.

By SVD decomposition, \(A = U \Sigma V^*\), where \(U\) and \(V\) are unitary and \(\Sigma\) is diagonal with positive (because A is nonsingar) singular values \(\sigma_i\) on its diagonal. So we obtain:
\[\|U\|_{2}=\|V\|_{2}=1\]
\[\|A\|_{2}=\|U \Sigma V^*\|_{2}=\max_{|x|=1} |x^*V \Sigma^* U^*U \Sigma V^*x|^{\frac{1}{2}}=\max_{|y|=1} |y^* \Sigma^*\Sigma y|^{\frac{1}{2}}=\|\Sigma\|_{2}\]
\[\|\Sigma\|_{2}=\max_{|x|=1} |x^* \Sigma^*\Sigma x|^{\frac{1}{2}}=\max_{\sum_{i=1}^{n}|x_i|^2=1}\left(\sum_{i=1}^{n} |x_i|^2\sigma_i^2\right)^{\frac{1}{2}}=\sigma_{\max}\]

On the other hand, \[A^{-1}=V^{-*} \Sigma^{-1} U^{-1} \Rightarrow \|A^{-1}\|_{2}=\|\Sigma^{-1}\|_{2}\]

\(\Sigma^{-1}\) is diagonal with positive singular values \(\frac{1}{\sigma_i}\) on its diagonal. So we obtain:

\[\|\Sigma^{-1}\|_{2}=\max_{\sum_{i=1}^{n}|x_i|^2=1}\left(\sum_{i=1}^{n} |x_i|^2\frac{1}{\sigma_i^2}\right)^{\frac{1}{2}}=\frac{1}{\sigma_{\min}}\]

Thus, \[\|A\|_{2}=\sigma_{\max},\ \|A^{-1}\|_{2}=\frac{1}{\sigma_{\min}}\Rightarrow \|A\|_{2} \|A^{-1}\|_{2} = \frac{\sigma_{\max}}{\sigma_{\min}}\]


\subsubsection{}
(2) If \( A \) is also normal, we have
\(\operatorname{cond}_{2} A = \frac{|\lambda_{\max}|}{|\lambda_{\min}|},
\)\\ \textbf{proof:}

\(A\) is normal, so it holds the property that there exists a unitary matrix \(U\) s.t. \(A=UTU^* \), where \(T\) is diagonal with eigenvalues \(\lambda_i\) of \(A\) on its diagonal. So we obtain:

\[\|A\|_{2}=\max_{|x|=1} |x^*U T^* U^*U T U^*x|^{\frac{1}{2}}=\max_{|y|=1} |y^* T^*T y|^{\frac{1}{2}}=\max_{\sum_{i=1}^{n}|y_i|^2=1}\left(\sum_{i=1}^{n} |y_i|^2\overline{\lambda_i}\lambda_i\right)^{\frac{1}{2}}=|\lambda_{\max}|\]

Similarly, \[\|A^{-1}\|_{2}=|\frac{1}{\lambda_{\min}|}\]

Therefore, if \( A \) is also normal, \(\|A\|_{2} \|A^{-1}\|_{2} = \frac{|\lambda_{\max}|}{|\lambda_{\min}|} \).


\subsubsection{}
(3) If \( A \) is unitary, we have \( \operatorname{cond}_{2} A = 1 \).\\ \textbf{proof:}

We have \( A \) and \( A^{-1} \) are unitary and thus \[\|A\|_{2}=1,\ \|A^{-1}\|_{2}=1 \Rightarrow \|A\|_{2} \|A^{-1}\|_{2}=1\] 




\subsection{XI. }

Let \(\mathbf{a}=(a_{0}, a_{1}, \ldots, a_{n-1})\), For \(r = f(\mathbf{a})\),
\[0 = q(r) = \sum_{i=0}^{n} a_{i} r^{i},\ a_n=1,\ a_0 \neq 0 \Rightarrow r \neq 0\]

Take the derivative of with respect to \(a_k\),
\[r^k+\sum_{i=1}^{n}ia_ir^{i-1} \cdot \frac{\partial r}{\partial a_k}=0\]

Hence,
\(\nabla f=(\frac{\partial r}{\partial a_0},\cdots,\frac{\partial r}{\partial a_n})\), where \[\frac{\partial r}{\partial a_k}=-\frac{r^k}{q'(r)} \]

Based on the 1-norm, the \textbf{componentwise} contition number
is \(\operatorname{cond}_f= \|A(x)\|_1\), where

\[A(x)=(\frac{a_0\frac{\partial f}{\partial a_0}}{r},\cdots,\frac{a_{n-1}\frac{\partial f}{\partial a_{n-1}}}{r}) \]
\[\Rightarrow \|A(x)\|_1=\max_{0\leq k \leq n-1} |a_k\frac{r^{k}}{rq'(r)}=\frac{1}{|q'(r)|}\max_{0\leq k \leq n-1} |a_k r^{k-1}| \]
\[\Rightarrow \operatorname{cond}_f= \|A(x)\|_1=\frac{1}{|q'(r)|}\max_{0\leq k \leq n-1} |a_k r^{k-1}| \]

In the Wilkinson Example, \(q:=\prod_{k=1}^p (x-k)\), for root \(r=p\), \(h \approx \frac{\epsilon p^p}{(p-1)!} \).

We have \[n=p,\ a_{p-1}=\frac{p(p+1)}{2},\ q'(r)=\prod_{k=1}^{p-1} (p-k)=(p-1)!\]
\[\Rightarrow \operatorname{cond}_f \geq \frac{1}{|q'(r)|}|a_p p^{p-2}|=\frac{1}{(p-1)!}\frac{p(p+1)}{2} p^{p-2} \geq \frac{p^p}{2(p-1)!} \]

So, \( \text{cond}_f(\mathbf{a}) \rightarrow \infty \) as \( p \rightarrow \infty \).

The result indicates that the condition number of the Wilkinson polynomial is very large if its degree \( p \) is large.

Thus, finding roots of high-degree polynomials is challenging by numerical methods.




\subsection{XII. }

(I went crazy to find an example and I guess \(p\) must be small for a example, and there are no examples under \(\beta=10\) \textbf{if the leading zeros in the division result do not occupy positions.})

My example is on \(\mathcal{F}:(2,2,-5,5);\ a=(4)_{10}=1 \times 2^2,\ b=(3)_{10}=1.1 \times 2^1 \).

In a register of precision \(2p=4\): (Under this precision we cannot see more digits and there is a tie when choosing the last digit.)
\[\frac{a}{b}=1.0101\cdots \times 2^0=1.010 \times 2^0\]

Round to precision \(p=2\): (take an even last digit in case of a tie)
 \[\frac{a}{b}=1.0 \times 2^0=(1)_{10}\]

Thus, the round error is (under decimal):
\[\delta=\frac{1}{\frac{4}{3}}-1=-\frac{1}{4} \]

Since \(\epsilon_u=\frac{1}{2} \beta^{1-p}=\frac{1}{4}=|\delta|\), we reached a contradiction to the conclusion of \textit{the model of machine arithmetic}!

\textbf{P.S.} By the way, in a register of precision \(2p+1=5\), \[\frac{a}{b}=1.010101\cdots \times 2^0=1.0101 \times 2^0\]

Round to precision \(p=2\), \[\frac{a}{b}=1.1 \times 2^0=(1.5)_{10}\]

Hence, round error satisfies \textit{the model of machine arithmetic} this time:
\[\delta=\frac{\frac{3}{2}}{\frac{4}{3}}-1=\frac{1}{8}<\epsilon_u \]





\subsection{XIII. }

From \textit{IEEE 754 single-precision protocol}, we have \(\beta=2,\ p=24\) and \[128=1.\overbrace{0\cdots0}^{23 \text{ zeros}} \times 2^7,\ 129=1.\overbrace{0\cdots0}^{6 \text{ zeros}}1\overbrace{0\cdots0}^{16 \text{ zeros}}\times 2^7 \]

Hence, the difference between two adjacent FPNs between \([128,129]\) is \(0.\overbrace{0\cdots0}^{22 \text{ zeros}}1 \times 2^7= 2^{-16}\)

Thus the upper bound of the absolute error is \(\frac{1}{2} \times 2^{-16} > 10^{-6}  \) because \(2^{17} < 10^6\).

We CANNOT compute the root with absolute accuracy \(< 10^6\).



\subsection{XIV. }

By \textbf{Theorem 3.7}, the cubic spline \( s \) is uniquely determined if all the \( f_i=s(f;x_i)\) and \(m_i=s'(f;x_i) \) are uniquely determined on all intervals. For a complete cubic spline we already have \( m_1 = f'(a) \) and \( m_N = f'(b) \). 

Here is the linear system to solve all \( m_i \)'s (by \textbf{Lemma 3.3}):
\[
\begin{bmatrix}
2 & \mu_2 & & & \\
\lambda_3 & 2 & \mu_3 & & \\
& \ddots & \ddots & \ddots & \\
& & \lambda_i & 2 & \mu_i & \\
& & & \ddots & \ddots & \\
& & & \lambda_{N-2} & 2 & \mu_{N-2} \\
& & & & \lambda_{N-1} & 2
\end{bmatrix}
\begin{bmatrix}
m_2 \\
m_3 \\
\vdots \\
m_i \\
\vdots \\
m_{N-2} \\
m_{N-1}
\end{bmatrix}
=\mathbf{b}
\]

where the vector \( b \) is determined from the known information and by \textbf{Equation (3.4)},
\[\mu_{i} = \frac{x_{i} - x_{i-1}}{x_{i+1} - x_{i-1}}<1, \quad \lambda_{i} = \frac{x_{i+1} - x_{i}}{x_{i+1} - x_{i-1}}<1\]

It implies that the matrix in the above equation is strictly diagonally dominant. Therefore, its determinant is nonzero, and the \( m_i \)'s can be uniquely determined.

So we turn into a specific interval \([x_i,x_{i+1}]\) with \(\delta=x_{i+1}-x_i \) is relatively small. The equation for computing the cubic spline \( p(x) = a + b(x - x_i) + c(x - x_i)^2 + d(x - x_i)^3 \) on this interval is:
\[
\begin{bmatrix}
1 & 0 & 0 & 0 \\
1 & \delta & \delta^2 & \delta^3 \\
0 & 1 & 0 & 0 \\
0 & 1 & 2\delta & 3\delta^2
\end{bmatrix}
\begin{bmatrix}
a \\
b \\
c \\
d
\end{bmatrix}
=
\begin{bmatrix}
f_i \\
f_{i+1} \\
m_i \\
m_{i+1}
\end{bmatrix}
\]

We aim to solve \(a,b,c,d\) with fixed \(f_i,f_{i+1},m_i,m_{i+1}\) and \(\delta\).

When \(\delta\) is much smaller, the matrix becomes singular. And thus the condition number of the matrix becomes much bigger so that the result becomes inaccurate.


\end{document}