\documentclass{article}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

% useful packages.
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{multicol}
\usepackage{fancyhdr}
\usepackage{layout}
% \usepackage{ctex}
\usepackage{listings}
\usepackage{subfigure}
\usepackage{setspace}

% some common command
\newcommand{\dif}{\mathrm{d}}
\newcommand{\avg}[1]{\left\langle #1 \right\rangle}
\newcommand{\difFrac}[2]{\frac{\dif #1}{\dif #2}}
\newcommand{\pdfFrac}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\OFL}{\mathrm{OFL}}
\newcommand{\UFL}{\mathrm{UFL}}
\newcommand{\fl}{\mathrm{fl}}
\newcommand{\op}{\odot}
\newcommand{\Eabs}{E_{\mathrm{abs}}}
\newcommand{\Erel}{E_{\mathrm{rel}}}
\newcommand{\RNum}[1]{\uppercase\expandafter{\romannumeral #1\relax}}

\usepackage{xcolor}
\usepackage{fontspec} 
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{comment}{rgb}{0.56,0.64,0.68}

\newfontfamily\monaco{Monaco}
\lstset {
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,       % underline spaces within strings
columns=flexible,
framerule=1pt,
rulecolor=\color{gray!35},
backgroundcolor=\color{gray!5},
basicstyle={\small\monaco},           % the size of the fonts that are used for the code
numbers=left,                   % where to put the line-numbers
numberstyle=\tiny\monaco\color{gray},  % the style that is used for the line-numbers
numbersep=5pt,                  % how far the line-numbers are from the code
commentstyle=\color{comment},
keywordstyle=\color{blue},
stringstyle=\color{dkgreen},
tabsize=2,                      % sets default tabsize to 2 spaces
captionpos=b,                   % sets the caption-position to bottom
breaklines=true,                % sets automatic line breaking
breakatwhitespace=false,        % sets if automatic breaks should only happen at whitespace
escapeinside={\%*}{*)},            % if add LaTeX within your code
morekeywords={*,...}               % if add more keywords to the set
}


\begin{document}
\title{Homework \#4}
\pagestyle{fancy}
\lhead{Name Li HuiTeng 3180102114}
\chead{ NumAnalysis\#4}
\rhead{Date 21.11.27}

\section{Theoretical questions}

\RNum{1}
\begin{proof}
  \begin{align*}
    477=256+128+64+16+8+4+1&=(111011101)_2\\
    &=(1.11011101)_2\times 2^8\\
    &=(1+\frac{1}{2}+\frac{1}{2^2}+\frac{1}{2^4}+\frac{1}{2^5}+\frac{1}{2^6}+\frac{1}{2^8})\times2^8
  \end{align*}
\end{proof}

\RNum{2}
\begin{proof}
  \begin{align*}
    \frac{3}{5}=(0.100110011001\cdots)_2=(1.001100110011\cdots)_2\times2^{-1}
  \end{align*}
\end{proof}

\RNum{3}
\begin{proof}
Since
\begin{align*}
  x_L&=(\beta-\beta^{1-p})\beta^{e-1},\\
  x_R&=(1+\beta^{1-p})\beta^{e},
\end{align*}
we have 
\begin{align*}
  x_R-x&=(1+\beta^{1-p})\beta^{e}-\beta^e=\beta^{1-p+e},\\
  \beta(x-x_L)&=\beta^{e+1}-(\beta-\beta^{1-p})\beta^e=\beta^{1-p+e},
\end{align*}
which proves $x_R-x=\beta(x-x_L)$.
\end{proof}

\RNum{4}
\begin{proof}
We have 
\begin{align*}
  x_L&=(1.00110011001100110011001)_2\times2^{-1},\\
  x_R&=(1.00110011001100110011010)_2\times2^{-1},\\
  x-x_L&=(1.0011001100\cdots)_2\times2^{-24}=0.6\times2^{-24},\\
  x_R-x&=\epsilon_M2^{-1}-(x-x_L)=0.4\times2^{-24},\\
  fl(x)&=x_R,\\
  e_r&=|x-x_L|/x=\frac{2^{-23}}{3}.
\end{align*}
\end{proof}

\RNum{5}
\begin{proof}
Since $\beta > 1$, the unit roundoff would be 
$\sum_{k=p}^{\infty}(\beta-1)\beta^{-k}=\beta^{1-p}$. 
For IEEE 754 single-precision protocol, it would be $2^{-23}$.
\end{proof}

\RNum{6}.
\begin{proof}
Since $1-cos(0.25)=(1.9896\cdots)\times2^{-6}$, 
to normalize the mantissa into the interval $[1,\beta)$, it 
would be multiplied by 6. Hence the number of lost bits 
of precision is $6$ for $\beta=2$.
\end{proof}

\RNum{7}
\begin{proof}
One way is to compute $2\times sin(x/2)\times sin(x/2)$, and another way is to use Taylor series
\begin{align*}
  1-cos(x)=\sum_{k=1}^{\infty}(-1)^{k+1}\frac{x^{2k}}{(2k)!}.
\end{align*} 
\end{proof}

\RNum{8}
\begin{proof}
We have 
\begin{align*}
  C_{f1}&=|\frac{\alpha x}{(x-1)}|,\\
  C_{f2}&=|\frac{1}{ln(x)}|,\\
  C_{f3}&=|x|,\\
  C_{f4}&=|\frac{x}{\sqrt{1-x^2}arccos(x)}|.
\end{align*}
And when $x$ satisfies the following statement, 
the corresponding condition number will be large.
\begin{align*}
  f_1&:x\to1,\\
  f_2&:x\to1,\\
  f_3&:|x|\to \infty,\\
  f_4&:|x|\to 1. 
\end{align*}
\end{proof}

\RNum{9}
\begin{proof}
  For (a), 
  \begin{align*}
    cond_{f}=\frac{xe^{-x}}{1-e^{-x}}=\frac{x}{e^{x}-1}\leq1.
  \end{align*}
  where the third step follows from $e^x\geq x+1(x\geq0)$ and $lim_{x\to 0}\frac{x}{e^x-1}=1$. 

  For (b), 
  \begin{align*}
    y_A&=(1-e^{-x}(1+\delta_1))(1+\delta_2)\\
    &=1-e^{-x}-\delta_1e^{-x}+\delta_2-e^{-x}\delta_2\\
    &=(1-e^{-x})(1+\delta_2-\delta_1\frac{1}{e^x-1}),
  \end{align*}
where $\delta_i\leq\epsilon_u$ for $i=1,2$ and second-order terms are neglected in the third step. 
 Hence we have $\phi(x)=1+\frac{1}{e^x-1}$ and 
 \begin{align*}
   cond_A(x)\leq \frac{e^x-1}{x} (1+\frac{1}{e^x-1})=\frac{e^x}{x}.
 \end{align*}

 For (c), by plotting we see 

 \includegraphics[width=0.45\textwidth]{condf.png}
\includegraphics[width=0.45\textwidth]{condA.png}

\lstset{language=Matlab}
\begin{lstlisting}
  %This is a file named hw49.m.
  clear;
  figure(1);
  x=0.0:0.01:1;
  cond_f=x.*exp(-x)./(1-exp(-x));
  plot(x,cond_f,'LineWidth',3);set(gca,'FontSize',20);title('cond_f');
  saveas(1,'condf.png');
  cond_A=(1+1./(exp(x)-1))./cond_f;
  plot(x,cond_A,'LineWidth',3);set(gca,'FontSize',20);title('cond_A');
  saveas(1,'condA.png');
\end{lstlisting}
The function itself is quite well-conditioned, since 
\begin{align*}
  0.5 \leq cond_f(x) \leq 1, \forall x \in[0,1].
\end{align*}
And the algorithm A is ill-conditioned near $x=0$ because of catastrophic cancellation 
and well-conditioned near $x=1$.
\end{proof}

\RNum{10}
\begin{proof}
  Define $a=(a_0,a_1,\cdots,a_{n-1})$ and $f_i=\difFrac{}{a_i}f$. 
   Then we have $r=f(a),q(r)=0$, 
  and the componentwise condition number should be 
  \[
  cond_f(a)=\sum\limits_{i=0}^{n-1}\frac{|a_if_i(a)|}{|f(a)|}.  
  \]
  Since $q(r)=q(f(a))=\sum_{i=0}^{n}a_if(a)^i\equiv 0$, differentiating this with respect to $a_k$,
   $k=0,1,\cdots,n-1$, we have
  \[
  0= \difFrac{}{a_k}q(f(a))=f_k(a)\sum\limits_{i=1}^{n}ia_if(a)^{i-1}+f(a)^k, 
  \] 
  which can be written as $f_k(a)q'(r)+r^k=0$.

  When $r$ is a simple root, we have $q'(r)\neq0$, which yields 
  \[
  cond_f(a)= \frac{1}{|q'(r)|}\sum\limits_{i=0}^{n-1}|a_ir^{i-1}|. 
  \]
  Consider the Wilkinson Example, i.e. $q(x)=\prod_{k=1}^{n}(x-k)$ and take $r=n$. Then we have 
  \begin{align*}
    cond_f(a)= \frac{1}{(n-1)!}\sum\limits_{i=0}^{n-1}|a_in^{i-1}|\geq\frac{1}{(n-1)!}|a_{n-1}n^{n-2}|=\frac{1}{(n-1)!}\frac{n(n+1)}{2}n^{n-2}\geq\frac{1}{2(n-1)!}n^{n},    
  \end{align*}
  which is similar to $h=-\epsilon\frac{n^n}{(n-1)!}$ in Wilkinson Example and in the same way it proves that 
  a small change of coefficients would cause a huge change of the root when $n$ is large. The comparison also 
  implies that solving a problem with huge condition number directly is hopeless.  
\end{proof}
\newpage
\RNum{11}
\begin{proof}
  Consider the calculation of $c:=fl(a/b)$, in 
  an FPN system with $\mathcal{F}:\{10,p,L,U\} $. 

  First, assume $p=4$, $a=1.000$ and $b=9.995$. Since $M_a=1.000$, $M_b=9.995$ and $\frac{1.000}{9.995}=0.100050025\cdots$, we have $M_c=0.1000500$, due to a necessary roundoff in the register 
  of precision $8$. After normalization, $\beta M_c=1.000500$ and $e_c=-1$. Hence, the final result 
  is $M_c=1.000$ with a roundoff to precision $4$, caused by \textbf{round to even} in the case of a tie.

  Therefore, we have 
  \begin{align*}
    |\frac{fl(a/b)-a/b}{a/b}|=\frac{|1.000\times10^{-1}-\frac{1.000}{9.995}|}{\frac{1.000}{9.995}}=\frac{1-0.995}{1.000}=0.005,
  \end{align*}
  which implies $|\delta|=\epsilon_u$ and contradicts the conclusion of the model of machine arithmetic. 
  \\ \\
  However, $2p$ is just enough for the register if one's proposal is to just satisfy $|\delta| \leq \epsilon_u$. To be more precise, we will show the max possible $|\delta|$ 
  for "the worst case" cannot be greater than $\epsilon_u$. 

  Just consider the worst case that $2p$ might fail, i.e. a wrong carry-over occurs when $|M_a/M_b|<1$.
  Then for $\mathcal{F}:\{10,4,L,U\}$, the normalized mantissa of the bug number (the precision is supposed to be 'infinte'), 
  denoted by $x=a/b$, must satisfies either of the following statements, 
  which are necessary for a wrong carry-over in a $2p=8$ register:

  $$(A)x=a_0.a_1a_2a_3499a_7a_8\cdots,$$ 
  
  where $a_0\geq1,a_7\geq5$ and $a_3$ be odd. The output would be $a_0.a_1a_2a_3+10^{1-p}$, and it should have been $a_0.a_1a_2a_3$.
  
  $$(B)x=a_0.a_1a_2a_3500a_7a_8\cdots,$$
  
  where $a_0\geq1,1\leq a_7\leq4$ and $a_3$ be even. The output would be $a_0.a_1a_2a_3$, and it should have been $a_0.a_1a_2a_3+10^{1-p}$.(caused by \textbf{round to even})

  If x is in situation (A), i.e. $x=a_0.a_1a_2a_3499a_7a_8\cdots$, next we will show it's impossible.

  By floating, in the $2p=8$ register 
  we have $M_c=0.a_0a_1a_2a_3500$ and $\beta M_c=a_0.a_1a_2a_35000$ and $M_{c2}=a_0.a_1a_2a_3+10^{1-p}$. 
  
  By computing the max possible absolute error, we have $|M_{c2}-x|=e_{abs}\leq5\times10^{-p}+5\times10^{-2p+1}$, 
  where the equation holds if and only if $x=a_0.a_1a_2a_3499500\cdots$. 
  Then if $|\frac{E_{abs}}{x}|>\epsilon_u$ holds, $x<1+5\times10^{-p+1}$, which implies $a_0=1,a_i=0,\forall i=1,\cdots,p-1$. However, $a_3$ should be odd. 
  A contradiction! 

  Therefore, x must be in situation (B), i.e. $x=a_0.a_1a_2a_3500a_7a_8\cdots$.\\
  
  After simple enumeration, we find $a_0=1.000,b_0=9.995,a_0/b_0=0.100050025\cdots$ and $|\delta|=\epsilon_u$. Next we will show $(a_0,b_0)$ is the only possible pair that fails $|\delta|<\epsilon_u$.
  
  First, similar inequations still hold for situation (B) that,
  \begin{align*}
    \text{ if } |\frac{E_{abs}}{x}|>\epsilon_u \text{ holds, }\\
    |M_{c2}-x|&<5\times10^{-p}+5\times10^{-2p+1},\\ 
     x&<1+5\times10^{-p+1}. 
  \end{align*}
  This implies $x$ should be 
  \[x=1.00a_3500a_7a_8\cdots,\]
  where $a_3\in\{0,2,4\},1\leq a_7\leq 4$.
  
  Then
  \begin{align*}
  \frac{0.000500a_7a_8\cdots}{1.00a_3500a_7a_8\cdots}&>5\times10^{-4},\\
    0.000500a_7a_8\cdots&>0.0005+a_3\times5\times10^{-4-3}+(0.000500a_7\cdots)\times5\times10^{-4}\Rightarrow \\
    a_7.a_8\cdots>5\times a_3 .
  \end{align*} 
  Since $5a_3< a_7+1\leq5$, we have $a_3=0$. Hence $x=1.000500a_7a_8\cdots$, $x\in[1.0005001,1.0005005)$.
  
  Since $b_{sup}=9.999$, then $a \leq x_{sup}*b_{sup}*10^{-1}=1.000400\cdots$, which yields $a=1.000$.
  
  Since $1.000/9.994=0.100060\cdots$, we have $\forall b \leq 9.994,1.000/b>x_{sup}$ and fails the constraint. 
  
  Hence we only need to check $b\in\{9.996,9.997,9.998,9.999\}$. By simple calculation, all four numbers fail the constraint on $x$.
  
  Thus we prove the only possible case for breaking $|\delta|<\epsilon_u$ is $(a,b)=(1.000,9.995)$ with $|\delta|=\epsilon_u$. 
  
  Generally, for any $\beta,p$, when register has a precision $2p$, $a_p=1.0\cdots0$ and $b_p=\beta(1-\epsilon_u)$ is the only possible case for breaking $|\delta|<\epsilon_u$ with $\delta=\epsilon_u$. The proof is just similar.
\end{proof}

\RNum{12}
\begin{proof}
  % When the distance between two adjacent points is much smaller than those of other 
  % adjacent pairs, the coefficient matrix, $A$, will have a pair of rows much closer to each other. 
  % Then there exists an eigenvalue of $A$, $\lambda_0$, which is suffiently close to zero. As a result, 
  % the condition number of 2-norm will be catastrophically large and leads to inaccurate solution.
  (what a hell????????)
\end{proof}

\RNum{13}
\begin{proof}
	The answer is No. This is because single precision only permits 23 bits of mantissa. To archive an absolute accuaracy no greater 
	than $10^{-6}$, the fractional part of our guesstimate demands at least 18 valid bits, since $2^{-18}=8^{-6}>10^{-6}$. However, 
	the integer part also demands 8 bits since $128=(10000000)_2$. As a consequence, the valid bits of mantissa should no less than $18+8-1=25$, which 
	apparently fails a single precision machine. (-1 is because we always leak out the first '1' in a mantissa string.)
\end{proof}

\RNum{14}
\begin{proof}
  We have 
\begin{align*}
  cond_f&=\frac{x\frac{1}{cosx}}{\frac{sinx}{1+cosx}}=\frac{x}{sinx},\\
  f_A&=\frac{sinx(1+\delta_1)}{(1+cosx(1+\delta_2))(1+\delta_3)}(1+\delta_4),
\end{align*}
where $\delta_k$ are the relative errors committed, respectively, in evaluating
the sine function, the cosine function, the sum, and the quotient. 
Neglecting terms of $O(\delta_k^2)$, we obtain
\begin{align*}
  f_A&=\frac{sinx}{1+cosx}\frac{1+cosx}{1+cosx(1+\delta_2)}\frac{(1+\delta_1)(1+\delta_4)}{1+\delta_3}\\
  &=\frac{sinx}{1+cosx}(1-\frac{\delta_2cosx}{1+cosx(1+\delta_2)})(1+\delta_1)(1+\delta_4)(1-\delta_3)\\
  &=\frac{sinx}{1+cosx}(1-\frac{\delta_2cosx}{1+cosx(1+\delta_2)})(1+\delta_1+\delta_4-\delta_3)\\
  &=\frac{sinx}{1+cosx}(1+\delta_1+\delta_4-\delta_3-\frac{\delta_2cosx}{1+cosx}).\\
\end{align*}
Then, choose $\phi(x)=3+\frac{cosx}{1+cosx}$ and 
\begin{align*}
  cond_A(x)\leq \frac{sinx}{x}(3+\frac{cosx}{1+cosx}):=h(x).
\end{align*}
Since
\begin{align*}
  \frac{6}{\pi}\leq h(x) \leq \frac{7}{2}, 0<x<\frac{\pi}{2},
\end{align*}
A is entirely well conditioned.
\end{proof}

\section{C++ programming}
Codes are contained in the e-mail tar, and only conclusions will be shown in the following part.
\subsection{\textbf{Assignment A}}
\lstset{language=Matlab}
\begin{lstlisting}
  >> Assignment
  ------------------------Assignment A--------------------------
  Plot has been generated as AssignmentA.png.
\end{lstlisting}
After plotting, we have

\includegraphics[width=0.45\textwidth]{AssignmentA1.png}
\includegraphics[width=0.45\textwidth]{AssignmentA.png}

The above plots record the violent oscillations of $f(x)$ and $g(x)$, which make function values even 
not positive as they should be. $h(x)$ behaves well and is the most accurate among three. 

This phenomenon can be attributed to the catastrophic cancellation caused by 
frequent operations in $f(x)$ and $g(x)$, especially for additions and subtrations. $f(x)$ takes $\sum_{k=1}^8k=36$ 
times of multiplication and $8$ times of addition/subtration, and they will trigger catastrophic cancellation since it always adds a big number to a small number. $g(x)$ takes $8$ times of multiplication and $8$ times of addition/subtration that will also trigger catastrophic cancellation. 
$h(x)$ takes $8$ times of multiplication and $1$ time of subtration, and it cannot trigger catastrophic cancellation since $x-1$ is accurate in precision 32. 

Therefore, $h(x)$ behaves far beyond other two, and $g(x)$ behaves a little better than $f(x)$. 

\subsection{\textbf{Assignment B}}
\lstset{language=Matlab}
\begin{lstlisting}
  ------------------------Assignment B--------------------------
  (1)UFL=0.500000,OFL=3.500000.
  (2)all normal numbers in F are listed, in a totol of 25:
           -3.5
             -3
           -2.5
             -2
          -1.75
           -1.5
          -1.25
             -1
         -0.875
          -0.75
         -0.625
           -0.5
              0
            0.5
          0.625
           0.75
          0.875
              1
           1.25
            1.5
           1.75
              2
            2.5
              3
            3.5
  
  (3)Plot has been generated. 
  (4)all subnormal numbers in F are listed, 
          0.125
           0.25
          0.375
         -0.375
          -0.25
         -0.125
  
  (5)Plot has been generated as AssignmentB.png 
\end{lstlisting}
After plotting, we have

\includegraphics[width=0.9\textwidth]{AssignmentB.png}

where blue points are normal numbers and green points are subnormal numbers. 

By corollary, the cardinality of $F$ should be $2^3(1+1+1)+1=25$, which corresponds to the output.
\end{document}
%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t
%%% End: 
