\documentclass[a4paper]{ctexart}
\usepackage{amsmath}
\usepackage{amssymb}%>=
%\usepackage{enumerate}
\usepackage{graphicx}%formula for reference
\usepackage{subfig}
\CTEXsetup[format = {\Large\bfseries}]{section}
\setlength{\parindent}{0pt}
\title{The fourth assignment}
\author{付临\\3200104960\\信息与计算科学}
%\renewcommand\thesection{\Roman{section}} % Roman numerals for the sections
\renewcommand\thesubsection{\roman{subsection}} % roman numerals for subsections

\begin{document}
\maketitle
\section{Theory assignments}
\subsection{}
{\bfseries Solution:}\\
$\bullet$ 477 divide by 2 and record the remainder.\\
$\bullet$ Repeat until reach 0, we get 111011101.\\
$\bullet$ Convert to binary floating-point numbers, 
that is, move the decimal point to an integer only 1. Because we moved 8 bits to the left.
So we get $477=(1.11011101)_{2}\times 2^{8}$.\\
$\bullet$ So the normalized FPN with $\beta=2$ of 477 is $(0.11011101)_{2}\times 2^{8}$.  

\subsection{}
{\bfseries Solution:}\\
$\bullet$ $\frac{3}{5}$ multiply by 2 and check whether the integer part is no less 
than 1: if so record 1; otherwise record 0.\\
$\bullet$ Repeat until reach 0, we get $0.10011001\cdots$.\\
$\bullet$ Convert to binary floating-point numbers, that is move the 
decimal point to an integer only 1. Because we moved 1 bit to the right. So
we get $\frac{3}{5}=(1.0011001\cdots)_{2}\times 2^{-1}$.\\
$\bullet$ So the normalized FPN with $\beta=2$ of $\frac{3}{5}$ is
$(0.00110011001\cdots)_{2}\times 2^{-1}$.

\subsection{}
{\bfseries Prove:}\\
Because $x=\beta^{e}$, $x_{L}<x<x_{R}$. Let $x_{L}=m_{1}\times \beta^{e-1}, 
x_{R}=m_{2}\times \beta^{e}$, $m_{1}、m_{2}\in[1,\beta)$. According to Lemma 4.23,
we get $x_{R}-x=\epsilon_{M}\beta^{e}, x-x_{L}=\epsilon_{M}\beta^{e-1}, \epsilon_{M}
=\beta^{1-p}$. So $x_{R}-x=\beta(x-x_{L})$.

\subsection{}
{\bfseries Solution:}\\
Because $\frac{3}{5}=(1.0011001\cdots)_{2}\times 2^{-1}$ and the IEEE 754
standard states that 23 bits are reserved for the mantissa. So $x_{L}=
(1.00110011001\cdots 1001)_{2}\times 2^{-1}$ and $x_{R}=(1.00110011001\cdots 1010)_{2}\times 2^{-1}$.
It follows that
\begin{align*}
    x-x_{L}&=\frac{3}{5}\times 2^{-24}\\
    x_{R}-x_{L}&=2^{-24}\\
    x_{R}-x&=\frac{2}{5}\times 2^{-24}
\end{align*}
So $fl(x)=x_{R}$. $\delta=\frac{x_{R}-x}{x}=\frac{\frac{2}{5}\times 2^{-24}}{\frac{3}{5}}
=\frac{2}{3}\times 2^{-24}$.

\subsection{}
{\bfseries Solution:}\\
If the IEEE 754 single-precision protocol did not round
off numbers to the nearest, but simply dropped excess
bits. We can get $fl(x)=x_{L}$. So the unit roundoff is $\max |x-x_{L}|
=2^{-23}$.

\subsection{}
{\bfseries Solution:}\\
Because $\cos \frac{1}{4}=(0.9689124\cdots)_{10}=(0.111111011001\cdots)_{2}$.
We let $x=1, y=(0.111111011001\cdots)_{2}$. So we have
\begin{align*}
    2^{-7} \leqslant 1-\frac{y}{x} \leqslant 2^{-6}
\end{align*}
Then the number of most significant digits that are lost in
the subtraction $1-\cos\frac{1}{4}$ is at most 7 and at least 6.

\subsection{}
{\bfseries Solution:}\\
$\bullet$ The Taylor expansion of $\cos x$ is
\begin{align*}
    \cos x=1-\frac{x^{2}}{2}+\frac{x^{4}}{4!}-\cdots
\end{align*}
So $1-\cos x=\frac{x^{2}}{2}-\frac{x^{4}}{4!}+\cdots$ and it can avoid catastrophic
cancellation caused by subtraction.\\
$\bullet$ Because $\cos x=1-2\sin^{2}\frac{x}{2}$, so $1-\cos x=2\sin^{2}\frac{x}{2}$
and it can avoid catastrophic cancellation caused by subtraction.

\subsection{}
{\bfseries Solution:}\\
$\bullet$ $C_{f}(x)=|\frac{xf'(x)}{f(x)}|=|\frac{x\alpha}{x-1}|$. So when $\alpha=0$, 
$C_{f}(x)=0$. When $\alpha \neq 0$, $C_{f}(x) \rightarrow +\infty$ as $x \rightarrow 1$.\\

$\bullet$ $C_{f}(x)=|\frac{xf'(x)}{f(x)}|=|\frac{1}{\ln x}|$. So $C_{f}(x) \rightarrow
+\infty$ as $x \rightarrow 1$.\\

$\bullet$ $C_{f}(x)=|\frac{xf'(x)}{f(x)}|=|x|$. So $C_{f}(x) \rightarrow +\infty$ as
$x \rightarrow \pm \infty$.\\

$\bullet$ $C_{f}(x)=|\frac{xf'(x)}{f(x)}|=|\frac{x}{\sqrt{1-x^{2}}\arccos x }|$. So
$C_{f}(x) \rightarrow +\infty$ as $x \rightarrow \pm 1$. 

\subsection{}
{\bfseries Solution:}\\
$\bullet$ We can get $cond_{f}(x)=|\frac{x}{e^{x}-1}|$. When $x\in[0,1]$, $cond_{f}(x)
=\frac{x}{e^{x}-1}$. Abviously, $cond_{f}(x)$ is monotone decreasing. Because 
$\lim_{x \to 0} \frac{x}{e^{x}-1}=1$, so we can get $cond_{f}(x) \leq 1$.\\

$\bullet$ It is easy to compute that
\begin{align*}
    cond_{f}(x)&=\frac{x}{e^{x}-1}\\
    f_{A}(x)&=[1-e^{-x}(1+\delta_{1})](1+\delta_{2})\\
    &=\frac{e^{x}-1}{e^{x}}(1+\delta_{2}-\frac{\delta_{1}}{e^{x}-1})
\end{align*}
where $|\delta_{i}|\leq\epsilon _{u}$ for i=1,2. Hence we have $\varphi(x)=2+\frac{1}{e^{x}-1}$ 
and
\begin{align*}
    cond_{A}(x)\leq\frac{e^{x}-1}{x}(2+\frac{1}{e^{x}-1})=\frac{2e^{x}-1}{x}
\end{align*}
Hence, $cond_{A}(x)$ may be unbounded as $x\rightarrow 0$. On the other hand, $cond_{A}(x)$
is controlled by $2+\frac{1}{e-1}$ as $x\rightarrow 1$.\\

$\bullet$\\
\includegraphics[width=\textwidth]{nine.png}\\
So we can get $cond_{f}(x) \leq cond_{A}(x)$, when $x\in [0,1]$. And $cond_{A}(x)
\rightarrow \infty$ as $x\rightarrow 0$.

\subsection{}
{\bfseries Solution:}\\
Because $cond_{f}(x)=||A(x)||_{1}$ and $a_{ij}(x)=|\frac{x_{j}\frac{\partial f_{i}}{\partial x_{j}}}
{f_{i}(x)}|$. So $a_{1j}=|\frac{a_{j-1}\frac{\partial r}{\partial a_{j-1}}}{r}|$. Because
\begin{align*}
    \frac{\partial q}{\partial a_{i}}&=r^{i}\\
    \frac{\partial q}{\partial r}&=q'(r)
\end{align*}
So $\frac{\partial r}{\partial a_{j-1}}=\frac{r^{j-1}}{q'(r)}$ and $a_{1j}=
|\frac{a_{j-1}r^{j-1}}{rq'(r)}|$. Then $cond_{f}(a)=||A(a)||_{1}=\max_{j}|\frac{a_{j-1}
r^{j-1}}{rq'(r)}|$.\\
For the Wilkinson problem: $q(r)=\prod _{i=1}^{n}, q'(r)=\sum_{i = 1}^{n}\prod_{j\neq i}
(r-j)$. The largest root of $q(r)$ is n. Because $a_{n-1}=-\frac{(n+1)n}{2}$ and $q'(n)
=(n-1)! $. So we have
\begin{equation*}
    a_{1n}=|\frac{-\frac{(n+1)n}{2}n^{n-2}}{(n-1)!}|\geqslant\frac{n^{n}}{n!}\quad (n\geqslant 2)
\end{equation*}
So $cond_{f}(a)=\max_{j}|a_{1j}|\geqslant|a_{1j}|\geqslant\frac{n^{n}}{2n!}$. As $n\rightarrow
\infty$, $cond_{f}(x)$ is so big. And hence a small change of the coefficient would
cause a large change of the root.

\subsection{}
{\bfseries Solution:}\\
Let's say $\beta =10, p=2, 2p=4$ and $M_{c}=0.1045, e_{c}=1$. Then $M_{c}\approx 0.105$.
After normalization, we can get $M_{c}=1.050, e_{c}=0$. Because p=2, so we get $M_{c}=1.1
, e_{c}=0$. Then the relative error is $\delta\geqslant\frac{0.055}{1.045}>\epsilon _{u}$.

\subsection{}
{\bfseries Solution:}\\
Because in single precision of FPNs of IEEE 754, $\beta =2,p=24$. And in $[128,129], e=7$.
So we have minimum interval of floating point numbers is $\epsilon _{u}2^{7}\approx 7.6
\times 10^{-6} > 10^{-6}$. Hence we cannot compute the root with the accuracy of $10^{-6}$. 

\subsection{}
{\bfseries Solution:}\\
Suppose when we calculate $S_{n}^{k}(x_{1},\cdots,x_{N})$, $x_{i}$ and $x_{i+1}$ are very
close. Let $S_{i}(x)=ax^{3}+bx^{2}+cx+d$. So
\begin{equation*}
    A=\left(\begin{array}{cccc}
        x_{i}^{3} & x_{i}^{2} & x_{i} & 1  \\
        x_{i+1}^{3} & x_{i+1}^{2} & x_{i+1} & 1 \\
        3x_{i}^{2}& 2x_{i} & 1 & 0  \\
        3x_{i+1}^{2}& 2x_{i+1} & 1 & 0\\
    \end{array}\right)
    \left(\begin{array}{c}
        a\\
        b\\
        c\\
        d\\
    \end{array}\right)
    =\left(\begin{array}{c}
        s_{i}(x_{i})\\
        s_{i}(x_{i+1})\\
        s_{i}'(x_{i})\\
        s_{i}'(x_{i+1})\\
    \end{array}\right)
\end{equation*} 
Then we can get when $x_{i}$ and $x_{i+1}$ are very close, $cond_{A}$ is big.\\
Take the complete spline for example. Suppose $x_{i+1}-x_{i}=\epsilon$. We have
\begin{equation*}
    A=\left(\begin{array}{cc}
        \epsilon^{2} & \epsilon^{3}\\
        2\epsilon & 3\epsilon^{2}\\
    \end{array}\right)
\end{equation*}
So $||A||_{1}=2\epsilon+\epsilon^{2}$ and $||A^{-1}||_{1}=\frac{3}{\epsilon^{2}}+\frac{2}
{\epsilon^{3}}$. Then $cond_{A}=||A||_{1}||A^{-1}||_{1}=O (\frac{1}{\epsilon^{2}})$
which is big.

\newpage
\section{Programming assignments}
\subsection{A}
\includegraphics[width=\textwidth]{program.png}\\
From the image we can see $h(x)$ is most accurate. Because $h(x)$ is the simplest formula
and there's a lot less addition, subtraction, multiplication and division than the other two.
So the relative error is relatively small.

\subsection{B}
$\bullet$\\
$UFL(\mathbb{F})=\beta^{L}=0.5$\\
$OFL(\mathbb{F})=\beta^{U}(\beta-\beta^{1-p})=3.5$\\

$\bullet$\\
We can get e=-1,0,1. Because p=3, so:\\
When e=-1, the four FPNs
\begin{align*}
    1.00\times 2^{-1}, 1.01\times 2^{-1}, 1.10\times 2^{-1}, 1.11\times 2^{-1}
\end{align*}
When e=0, the four FPNs
\begin{align*}
    1.00\times 2^{0}, 1.01\times 2^{0}, 1.10\times 2^{0}, 1.11\times 2^{0}
\end{align*}
When e=1, the four FPNs
\begin{align*}
    1.00\times 2^{1}, 1.01\times 2^{1}, 1.10\times 2^{1}, 1.11\times 2^{1}
\end{align*}
Each of them has the sign of $\pm$, plus 0 is in $\mathbb{F}$. So we have
\begin{align*}
    \#\mathbb{F}=25=2^{p}(U-L+1)+1
\end{align*}

$\bullet$\\
\includegraphics[width=\textwidth]{normal.png}

$\bullet$
The addition subnormal numbers are
\begin{align*}
    0.01\times 2^{-1}, 0.10\times 2^{-1}, 0.11\times 2^{-1}
\end{align*}
Each of them has the sign of $\pm$.\\

$\bullet$\\
\includegraphics[width=\textwidth]{subnormal.png}
\end{document}