\documentclass[a4paper,12pt,twocolumn]{article}
\usepackage{graphicx}
\usepackage{listings}
\usepackage[justification=centering]{caption}
\usepackage{subfigure}  
\usepackage{multirow}
\usepackage{balance}
\usepackage{gensymb}
\lstset{language=Matlab}
\lstset{breaklines}
\lstset{extendedchars=false}

\usepackage{amsmath,amsfonts,amsthm,amssymb}
\theoremstyle{definition}
\newtheorem{thm}{Theorem}
\newtheorem{exmp}{Example}
\newtheorem{defn}{Definition}
\newtheorem{lema}{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{coro}{Corollary}


\renewcommand{\baselinestretch}{1.25}

%------------setlength----------------%
\setlength{\textwidth}{162mm}
%\setlength{\textheight}{190mm}
\setlength{\textheight}{231mm}
\setlength{\headheight}{-0.1cm}
\setlength{\topmargin}{-0.1cm}
\setlength{\oddsidemargin}{-0cm}
\setlength{\evensidemargin}{-0cm}

\setlength\columnsep{12pt}
\setlength\columnseprule{0.4pt}

\setlength{\parskip}{1mm}
\setlength{\unitlength}{1mm}
\setlength{\parindent}{2em}

\title{Homework 1}

\author{Li Zhiqi\quad 3180103041 , }

\begin{document}
\maketitle
\section{Assignments}

\uppercase\expandafter{\romannumeral1}.\\
\begin{itemize}
\item $h_0 = 2$, then $h_n = 2^{-n}\cdot h_0 = 2^{-n-1}$.\\
\item For the case that the root lies close to endpoints of interval, the maximum distance is close to $d =
  \frac{h_n}{2} = 2^{-n}$.
\end{itemize}
\uppercase\expandafter{\romannumeral2}.\\
Take a sequence of roots $\{\alpha_n\}$ lying in $(a_0,b_0)$ such that $ \lim_{n \to \infty}\alpha_n = a_0$. Then
\begin{eqnarray}
  \frac{(b_0 - a_0)\cdot 2^{-(n+1)}}{a_0} \leq \frac{|c_n - a_0|}{|a_0|} \nonumber\\
   = \lim_{n \to \infty}\frac{|c_n - \alpha_n|}{|\alpha_n|} \leq \epsilon,
\end{eqnarray}
where the first inequality comes from the definition of $c_n$, and the last inequlity comes from that the relative error
no greater than $\epsilon$.\\
Then $\frac{(b_0 - a_0)\cdot 2^{-(n+1)}}{a_0} \leq \epsilon$ implies
\begin{equation}
  n \geq \frac{\log{(b_0 - a_0)} - \log{\epsilon} - \log{a_0}}{\log{2}} -1.
\end{equation}
\uppercase\expandafter{\romannumeral3}.\\
With FPN systems, the interval $[128,129]$ should be represented as $[1\times2^7,(1+\frac{1}{2^7})\times2^7]$. Resulting
from $p = 23+1$ , the minimum gap we can represent in this interval is $\frac{1}{2^{23}}\times2^7 = 2^{-16} =
\frac{1}{65536} > 10^{-6}$. Hence we can't compute the root with absolute accuracy $< 10^{-6}$.\\\\
\uppercase\expandafter{\romannumeral4}.\\
$p(x) = 4x^3-2x^2+3, p'(x) = 12x^2 - 4x$, then we get iteration formula:\\
\begin{equation}
  x_{n+1} = x_n - \frac{4x_n^3-2x_n^2 +3}{12x_n^2 - 4x_n}.
\end{equation}
With starting point $x_0 = -1$, the results of iterations are as follows:
\begin{table}[!htp]
  \centering
  \begin{tabular}{|c|c|}
    \hline	
    number of iterations & result \\
    \hline	
    $0$ & $-1$\\
    \hline	
    $1$ & $-\frac{13}{16}$\\
    \hline	
    $2$ & $-\frac{565}{733}$\\
    \hline	
    $3$ & $-\frac{1633}{2124}$\\
    \hline	
    $4$ & $-\frac{735}{956}$\\
    \hline	
  \end{tabular}
\end{table}\\
\uppercase\expandafter{\romannumeral5}.\\
Taylor's theorem implies
\begin{equation}
  f(\alpha) = f(x_n) + (\alpha - x_n)f'(\xi),
\end{equation}
where $\xi$ lies between $\alpha$ and $x_n$. By $f(\alpha) = 0$ and $e_n = \alpha - x_n$, we have
\begin{equation}
  e_n = -\frac{f(x_n)}{f'(\xi)}.
\end{equation}
Hence
\begin{eqnarray}
  e_{n+1} = \alpha - x_{n+1} = \alpha - (x_n - \frac{f(x_n)}{f'(x_0)}) \nonumber\\
  =e_n + \frac{f(x_n)}{f'(x_0)} = e_n + \frac{f(x_n)}{f'(\xi)}\frac{f'(\xi)}{f'(x_0)}\nonumber \\
  =(1 - \frac{f'(\xi)}{f'(x_0)})e_n,
\end{eqnarray}
where the last equation comes from (5). Set $ C = (1 - \frac{f'(\xi)}{f'(x_0)}), s = 1$, then we get
\begin{equation}
  e_{n+1} = Ce_n^s.
\end{equation}
We notice that $\frac{f'(\xi)}{f'(x_0)}$ is close to $1$ if initial value $x_0$ is sufficiently close to the root
$\alpha$. So $C = (1 - \frac{f'(\xi)}{f'(x_0)}) < 1$, which means the variation of Newton's method is linearly
convergent at least.\\\\
\uppercase\expandafter{\romannumeral6}.\\
If $x_0 = 0$, Apparently the iteration converges to $0$.\\
If $x_0 \in (0,\frac{\pi}{2})$, set $g(x) = \arctan{x}$, notice that
\begin{equation}
   \forall x\in (0,\frac{\pi}{2}), g(x)\in (0,\frac{\pi}{2}).
\end{equation}
\begin{eqnarray}
  \forall x,y \in (0,\frac{\pi}{2}), \nonumber \\
  |g(x) - g(y)| = |\arctan{x} - \arctan{y}| \nonumber \\
  = g'(\xi)|x - y| = \frac{1}{1+\xi^2}|x - y| \nonumber \\
  \leq \lambda|x - y|,
\end{eqnarray}
which uses Theorem C.51 and $\lambda \in (0,1)$. By (8),(9) and Theorem 1.38, the iteration is convergent.\\
If $x_0 \in (-\frac{\pi}{2},0)$, analysis is the same as the case $x_0 \in (0,\frac{\pi}{2})$. Hence the
iteration $$x_{n+1} = \arctan{x_n}$$ is convergent within $(-\frac{\pi}{2},\frac{\pi}{2})$.\\\\
\uppercase\expandafter{\romannumeral7}.\\
Consider $g(x) = \frac{1}{p + x}$ and iteration
\begin{equation}
  x_{n+1} = g(x_n), x_1 = \frac{1}{p}.
\end{equation}
Then $x = \lim_{n \to\infty}x_n$ (if the limit exist).\\
Notice that $p > 1$ implies
\begin{equation}
  \forall x \in (0,1), g(x) \in (0,1),
\end{equation}
and
\begin{eqnarray}
  |g(x) - g(y)| = |\frac{1}{p+x} - \frac{1}{p+y}| \nonumber \\
  =\frac{|x - y|}{(p+x)(p+y)} \leq \lambda|x - y|,
\end{eqnarray}
where $\lambda = \frac{1}{p^2} < 1$. By (11),(12) and Theorem 1.38, the iteration is convergent. Note $x = \lim_{n \to
  \infty}x_n$, let $n \rightarrow \infty$ in (10), we have
$$x = \frac{1}{p+x}.$$
With the condition $x > 0$, we get $$x = \frac{-p + \sqrt{p^2+4}}{2}.$$\\
\uppercase\expandafter{\romannumeral8}.\\
Similar to \uppercase\expandafter{\romannumeral2}, take a sequence of roots $\{\alpha_n\}$ lying in $(a_0,b_0)$ such
that $ \lim_{n \to \infty}\alpha_n = a_0 < 0$. Then
\begin{eqnarray}
  \frac{(b_0 - a_0)\cdot 2^{-(n+1)}}{-a_0}  = \frac{(b_0 - a_0)\cdot 2^{-(n+1)}}{|a_0|}\nonumber \\
  \leq \frac{|c_n - a_0|}{|a_0|} = \lim_{n \to \infty}\frac{|c_n - \alpha_n|}{|\alpha_n|} \leq \epsilon.
\end{eqnarray}
Then $\frac{(b_0 - a_0)\cdot 2^{-(n+1)}}{-a_0} \leq \epsilon$ implies
\begin{equation}
  n \geq \frac{\log{(b_0 - a_0)} - \log{\epsilon} - \log{(-a_0)}}{\log{2}} -1.
\end{equation}
As for the secend question, consider the case $\alpha = 0$. Then we can not measure relative error in this case.\\\\
\uppercase\expandafter{\romannumeral9}.\\
\begin{itemize}
\item if $k \geq 2$, then $f^{(k-1)}(\alpha) = 0, f^{(k)}(\alpha) \ne  0$. By proof of Theorem 1.15, we have
  \begin{equation}
    e_{n+1} = e_n^2\frac{f''(\xi)}{2f'(x_n)}.
  \end{equation}
  where $\xi$ is between $\alpha$ and $x_n$. When $x_0$ is sufficiently close to $\alpha$, by L'Hospital's rule we have
  \begin{equation}
    \lim_{n \to \infty}\frac{f''(\xi)}{2f'(x_n)} = \frac{f^{(k)}(\alpha)}{2f^{(k-1)}(\alpha)} \rightarrow \infty.
  \end{equation}
  Then the iteration does not have quadratic convergence. \\
  In fact, with the same analysis as \uppercase\expandafter{\romannumeral5},
  \begin{equation}
    e_{n+1} = (1 - \frac{f'(\xi)}{f'(x_0)})e_n,
  \end{equation}
  and by L'Hospital's rule, 
  \begin{equation}
    \lim_{n \to \infty}\frac{f'(\xi)}{f'(x_n)} = \frac{f^{(k)}(\alpha)}{f^{(k)}(\alpha)} \rightarrow 1,
  \end{equation}
  which shows $(1 - \frac{f'(\xi)}{f'(x_0)}) < 1$(for enough large n). Hence the iteration is linearly convergent in
  this case.
\item consider $\phi(x) = x - k\frac{f(x)}{f'(x)}$, we will check that $\phi'(\alpha) = 0$ but $\phi''(\alpha) \ne 0$.\\
  By the fact that $\alpha$ is a zero of multiplicity $k$ of $f$, it exists $g(x)$ such that $g(\alpha) \ne 0$, and
  \begin{equation}
    f(x) = (x - \alpha)^kg(x).
  \end{equation}
  Then
  \begin{equation}
     f'(x) = k(x - \alpha)^{k-1}g(x) + (x - \alpha)^kg'(x). \nonumber
   \end{equation}
   Hence
   \begin{eqnarray}
     \phi(x) = x - k\frac{(x - \alpha)^kg(x)}{k(x - \alpha)^{k-1}g(x) + (x - \alpha)^kg'(x)}\nonumber\\
     =x - k\frac{(x - \alpha)g(x)}{kg(x) + (x - \alpha)g'(x)}.\nonumber
   \end{eqnarray}
   \begin{eqnarray}
     \phi'(\alpha) = 1 - k\frac{g(\alpha)kg(\alpha)}{(kg(\alpha))^2} = 0.
   \end{eqnarray}
   \begin{eqnarray}
     \phi''(\alpha) = \frac{2k^3g'(\alpha)g^3(\alpha)}{k^4g^4(\alpha)} = \frac{2g'(\alpha)}{kg(\alpha)} \ne 0.
   \end{eqnarray}
   By (20),(21) and Corollary 1.41, we have proved that quadratic convergence is restored by making the modification.

  
\end{itemize}
\newpage
\section{ Programming}

A. The C++ package is completed as required. Use command \textbf{"make test"} to get results of test for three methods, and use \textbf{"make run"} to get results of following problems B,C,D,E and F.\\\\
B. We only show the result and absolute error here. Details can be found in \textbf{Assignment\_B.cpp}, with command
\textbf{"make run\_B"} to get all results.\\
(We set $\epsilon = 10^{-6}$ in following programs.)\\
Problem 1:\\ $x = 0.860334$,  $r = -2.22776\times10^{-7}$\\
Problem 2:\\ $x = 0.641186$,  $r = -5.61904\times10^{-8}$\\
Problem 3:\\ $x = 1.82938$,  $r = -9.37904\times10^{-8}$\\
Problem 4:\\Warning! The specified accuracy is not reached.\\
$x = 0.117877$, $r = -9.4847\times10^{7}$\\\\
C. We only show the result and absolute error as the same. Details can be found in \textbf{Assignment\_C.cpp}, with
command \textbf{"make run\_C"} to get all results.\\
Near 4.5:\\ $x = 4.4934095$, $r = -3.6948222\times10^{-12}$
Near 7.7:\\ $x = 7.7252518$, $r = -4.5138115\times10^{-11}$\\\\
D. We only show the result and absolute error as the same. Details can be found in \textbf{Assignment\_D.cpp}, with
command \textbf{"make run\_D"} to get all results.\\
Problem 1 : \\
With initial $0$ and $\frac{\pi}{2}$ :\\ $x = 3.1387951,  r = -9.7831591\times10^{-7}$\\
With initial $-2\pi$ and $-\frac{3\pi}{2}$:\\ $x = -9.422841, r = -4.6897694\times10^{7}$\\
Problem 2 : \\
With initial $1$ and $1.4$:\\ $x = 1.3063269,  r = -2.8434116\times10^{-8}$\\
With initial $-3$ and $-3.2$:\\ $x = -3.0964123,  r = -5.7001279\times10^{-12}$\\
Problem 3 : \\
With initial $0$ and $-0.5$:\\ $x = -0.1886854,  r = 2.1670906\times10^{-8}$\\
With initial $0$ and $0.5$:\\ $x = 0.45154322,  r = 2.1374913\times10^{-8}$\\
We find that the method get different results when giving different initial values. It's simply because another set of
initial values is closer to the other root of equation.\\\\
E. Take
\begin{equation}
  f(h) = L[0.5\pi r^2 - r^2\arcsin{\frac{h}{r}} - h(r^2-h^2)^{\frac{1}{2}}] - V
\end{equation}
with $L = $10ft, $r$ = 1ft, $V$ = 12.4ft$^3$. Then apply the three method to $f(h)$. Details can be found in
\textbf{Assignment\_E.cpp}, with command \textbf{"make run\_E"} to get all results.\\
Bisection method:\\ $d = r - h = 0.833984$,\\ absolute error is $0.00296641$.\\
Newton method:\\$d = r - h = 0.833891$,\\ absolute error is $0.00112416$.\\
Secant method:\\$d = r - h = 0.833827$,\\ absolute error is $-0.000128345$.\\\\
F. Similarily, we take
\begin{eqnarray}
  f(\alpha) = A\sin{\alpha}\cos{\alpha} + B\sin{^2\alpha} \nonumber\\
  - C\cos{\alpha} - E\sin{\alpha}
\end{eqnarray}
where
\begin{eqnarray}
  A = l\sin{\beta_1}, B = l\cos{\beta_1},\nonumber\\
  C = (h +0.5D)\sin{\beta_1} - 0.5D\tan{\beta_1},\nonumber\\
  E = (h +0.5D)\cos{\beta_1} - 0.5D.\nonumber
\end{eqnarray}
Details about (a),(b) and (c) can be found in \textbf{Assignment\_F.cpp}, with command \textbf{"make run\_F"} to get all results.\\
(a) Apply Newton's method with initial value $\alpha = 33\degree$, we get:\\
$\alpha = 0.575473 = 32.9722\degree$ with absolute error $9.66338\times10^{-13}$, which verifies $\alpha \approx
33\degree$.\\
(b) When $D = 30$in., Apply Newton's method with initial value $\alpha = 33\degree$, we get:\\
$\alpha = 0.578907 = 33.1689\degree$ with absolute error $1.30272\times10^{-9}$.\\
(c) Apply Secant method with different initial values, we find:\\
With initial value $33\degree$ and $0\degree$, \\
$\alpha = 0.578907 = 33.1689\degree$ with absolute error  $-1.17701\times10^{11}$.\\
With initial value $33\degree$ and $-11.4\degree$,\\ 
$\alpha = -0.200713 = -11.5\degree$ with absolute error $7.00486\times10^{-7}$.\\
It's easy to check that the equation always equals to zero when $\alpha = -\beta_1$, which means $-\beta_1$ is also a
root of equation. Then it explains why the secant method converges to $-\beta_1$ when the initial value is closer to it.



\end{document}
