\documentclass[a4paper]{article}
\usepackage{xltxtra}
\usepackage{graphicx}
\usepackage{fontspec}
\usepackage{cite}
\usepackage{geometry}
\usepackage{amsmath}

%\setmainfont{AR PL UMing CN}
\geometry{left=20mm,right=20mm,top=20mm,bottom=20mm}
\begin{document}  
 
\title{HW1}  
\author{Zhou Jiang 3220101339}   
\date{2024-9-29}  
\maketitle  
 
\section{Problem1:}  
\[
\text{width} = \frac{3.5 - 1.5}{2^n} = \frac{2}{2^n} = \frac{1}{2^{n-1}}
\]
When $n=1$, the distance between the middle point and the root of the interval is the largest, \\
and the maximum value is 1.\\

\section{Problem2:}
The root $x$ is between $b_n$and $a_n$, and the relative error of the result is qualified with $a_n$ and $b_n$
\[
    |c - x| \leq \epsilon |x| \implies |c - x| \leq \epsilon a_0.
\]
\[
    |c - x| \geq \frac{b_0 - a_0}{2^{n+1}}.     
\]
\[
    \frac{b_0 - a_0}{a_0 * 2^{n+1}} \leq \epsilon
\]
Take the logarithm of both sides to get:
\[
\log (b_0 - a_0) - (n+1) \log2 -\log (a_0)\leq \log\epsilon
\]

Finally get:
\[
\frac{\log (b_0 - a_0) - \log\epsilon - \log a_0}{\log2} - 1\leq n
\]

\section{Problem3:}  
\[  
    x_{n+1} = x_n - \frac{p(x_n)}{p'(x_n)}   
\]  

First, we need to calculate $p'(x)$:  
 
\[   
p'(x) = 12x^2 - 4x   
\]  
Start iterating. Starting at $(x_0 = -1)$, we will perform four iterations.\\    
\[  
\begin{array}{|c|c|}  
\hline  
n & x_n \text{ (Keep five decimals)} \\  
\hline  
0 & -1.00000 \\  
1 & -0.81250 \\  
2 & -0.77080 \\  
3 & -0.76883 \\  
4 & -0.76883 \\  
\hline  
\end{array}  
\]  

\section{Problem4:}  
Taylor Expansion
Using Taylor expansion of f, we can get:
\[
f(x_n) = f(\alpha + e_n) = f(\alpha) + f'(\alpha)e_n + \frac{f''(\epsilon)}{2}e_n^2 .
\]
The variable $\epsilon$ is between $\alpha$ and $x_n$.
Since $f(\alpha) = 0$, this simplifies to:
\[
f(x_n) = f'(\alpha)e_n + \frac{f''(\epsilon)}{2}e_n^2.
\]

Substituting $f(x_n)$ into our iteration formula, we can get:
\[
e_{n+1} = | x_{n+1} - \alpha | = | x_n - \frac{f(x_n)}{f'(x_0)} - \alpha |.
\]

This leads to:

\[
e_{n+1} = e_n - \frac{f'(\alpha)e_n + \frac{f''(\epsilon)}{2}e_n^2 }{f'(x_0)}.
\]

Factoring out $e_n$:

\[
e_{n+1} = e_n \left( 1 - \frac{f'(\epsilon)}{f'(x_0)} \right) - \frac{f''(\alpha)}{2f'(x_0)} e_n^2 .
\]

Therefore, we can express the error as:

\[
e_{n+1} = s e_n + O(e_n^2).
\]

The constant $s$ is defined as:
\[
s = 1.
\]

The constant $C$C may depend on $x_n$, $\alpha$, and the derivative $f'(\alpha)$ as:
\[
C = 1-\frac{f'(\alpha)}{f'(x_0)}.
\]

\section{Problem5:}  
\[
f(x) = arctan(x)
\]
\[
f'(x) = \frac{1} {1 + x^2}
\]
Derivative value in $(-\frac{\pi}{2},\frac{\pi}{2})$. In this interval, the derivative is always positive, and it is at $x=0$.The minimum value is 1.\\
Since the derivative is always positive and takes a minimum value at $x=0$, this means that the iteration is stable because it converges toward the fixed point of the iterative function $x=0$.

\section{Problem6:} 
By hint: this can be interpreted as $x = \lim_{n \to \infty} x_n$.
\[
x_0 = 0 , x_1 = \frac{1}{p} , x_2 = \frac{1}{1+\frac{1}{p}}  ...  x_{n+1} = \frac{1}{p + x_n}. 
\]
Therefore, the problem becomes the fixed point of the solution $f(x) = x , f(x) = \frac{1}{p + x}$\\
We know that a fixed point exists if the derivative is less than 0 and $x_n \ge 0$\\
Now,solve the equation:
\[
\frac{1}{p + x} = x. 
\]
Finally, we get:
\[
x = \frac{\sqrt{p^2 + 4} - p}{2}. 
\]

\section{Problem7:}  
Now consider the scenario where $a_0 < 0 < b_0$. 
Adjusted Error Bound
In this case, we can state that the root $x$ lies in the interval $[a_0, b_0]$.

The error bound now becomes:
\[
|c_n - x| \geq \frac{b_0 - a_0}{2^{n+1}}.
\]

To ensure the relative error is still valid, we want:
\[
\frac{|c_n - x|}{|x|} \leq \epsilon.
\]

\[
|c_n - r| \leq \epsilon \max(|a_0|, |b_0|).
\]

Following a similar approach as before, we get:
\[
\frac{b_0 - a_0}{2^{n+1}} \leq \epsilon \max(|a_0|, |b_0|).
\]
So we find:
\[
b_0 - a_0 \leq 2^{n+1} \epsilon \max(|a_0|, |b_0|).
\]

Taking logarithms, we derive the inequality:
\[
n \geq \frac{\log ( b_0 - a_0 ) - \log\epsilon - \log \max(|a_0|, |b_0|)}{\log2} - 1.
\]
\section{Problem8:} 
\subsection{(1):}
A multiple zero can be detected by examining the behavior of the points:
\[
    |f(x_n)| \sim |x_n - \alpha |^k.
\]
\subsection{(2):}
We can use the Taylor expansion of $f$ around $r$:
\[
    f(x_n) = f(r + e_n) = f(r) + f'(r)e_n + \frac{f''(r)}{2} e_n^2 + \cdots + \frac{f^{(k)}(r)}{k!} e_n^k + O(e_n^{k+1}),
\]
Since ( r ) is a root of multiplicity ( k ), this simplifies to:
\[
f(x_n) = \frac{f^{(k)}(r)}{k!} e_n^k + O(e_n^{k+1}).
\]
Then, we can use the Taylor expansion of $f'$ around $r$
\[
f'(x_n) = f'(r + e_n) = f'(r) + f''(r)e_n + \cdots + \frac{f^{(k)}(r)}{(k-1)!} e_n^{k-1} + O(e_n^k).
\]

This reduces to:
\[
f'(x_n) = \frac{f^{(k)}(r)}{(k-1)!} e_n^{k-1} + O(e_n^k).
\]
Now substituting $f(x_n)$ and $f'(x_n)$ into the modified iteration formula:

\[
x_{n+1} = x_n - k \frac{f(x_n)}{f'(x_n)}.
\]

Substituting gives:

\[
x_{n+1} = x_n - k \frac{\frac{f^{(k)}(r)}{k!} e_n^k + O(e_n^{k+1})}{\frac{f^{(k)}(r)}{(k-1)!} e_n^{k-1} + O(e_n^k)}.
\]
Thus,

\[
x_{n+1} = x_n - e_n + O(e_n^{2}) .
\]

This leads to:

\[
e_{n+1} = O(e_n^{2}).
\]
Then quadratic convergence in Newton's iteration will be restored.
\end{document}  