\documentclass[a4paper]{article}
\usepackage{xeCJK}
\setCJKmainfont{WenQuanYi Micro Hei}
\usepackage[affil-it]{authblk}
\usepackage[backend=bibtex,style=numeric]{biblatex}
\usepackage{graphicx}
\usepackage{geometry}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

\addbibresource{citation.bib}

\begin{document}
% =================================================
\title{Numerical Analysis homework \# 4}

\author{王劼 Wang Jie 3220100105
  \thanks{Electronic address: \texttt{2645443470@qq.com}}}
\affil{(math), Zhejiang University }


\date{Due time: \today}

\maketitle

\begin{abstract}
    theoretical homework 4  
\end{abstract}





% ============================================
\section*{theoretical homework}

Complete all the questions in section 4.4.1 and all the exercises through chapter4. \cite{wangheyu2024}

\subsection*{Exercise 4.33}  

\textbf{Answer:} \\

Consider the calculation of \( c := \text{fl}(a+b) \) with \( a = 1.234 \times 10^{4} \) and \( b = 8.769 \times 10^{4} \) in an FPN system \(\mathcal{F}: (10, 4, -7, 8)\).

\begin{enumerate}
    \item \( b \leftarrow 8.769 \times 10^{4}; \, e_{c} \leftarrow 4. \)
    \item \( m_{c} \leftarrow 10.003 \).
    \item \( m_{c} \leftarrow 1.0003; \, e_{c} \leftarrow 5. \).
    \item do nothing.
    \item \( m_{c} \leftarrow 1.000. \)
    \item \( c = 1.000 \times 10^{5}. \)
\end{enumerate}

Consider the calculation of \( c := \text{fl}(a+b) \) with \( a = 1.234 \times 10^{4} \) and \( b = -5.678 \times 10^{0} \) in an FPN system \(\mathcal{F}: (10, 4, -7, 8)\).

\begin{enumerate}
    \item \( b \leftarrow -0.0005678 \times 10^{4}; \, e_{c} \leftarrow 4. \)
    \item \( m_{c} \leftarrow 1.2334322 \).
    \item do nothing.
    \item do nothing.
    \item \( m_{c} \leftarrow 1.233. \)
    \item \( c = 1.233 \times 10^{4}. \)
\end{enumerate}

Consider the calculation of \( c := \text{fl}(a+b) \) with \( a = 1.234 \times 10^{4} \) and \( b = -5.678 \times 10^{3} \) in an FPN system \(\mathcal{F}: (10, 4, -7, 8)\).

\begin{enumerate}
    \item \( b \leftarrow -0.5678 \times 10^{4}; \, e_{c} \leftarrow 4. \)
    \item \( m_{c} \leftarrow 0.6662 \).
    \item \( m_{c} \leftarrow 6.662; \, e_{c} \leftarrow 3. \).
    \item do nothing.
    \item \( m_{c} \leftarrow 6.662. \)
    \item \( c = 6.662 \times 10^{3}. \)
\end{enumerate}

\subsection*{Exercise 4.42} 

\textbf{Answer:} \\

Consider the sequence of positive numbers: \( a_1 = 100 \), \( a_2 = 0.1 \), \( a_3 = 0.9 \) in an FPN system \(\mathcal{F}: (10, 3, -7, 8)\).

Adding in order of magnitude:
\begin{enumerate}
    \item \( \text{fl}(0.9 + 0.1) = 1.0 \)
    \item \( \text{fl}(100 + 1.0) = 101 \)
\end{enumerate}

Adding in original order:
\begin{enumerate}
    \item \( \text{fl}(100 + 0.1) = 100 \)
    \item \( \text{fl}
    (100 + 0.9) = 100 \)
\end{enumerate} 

\subsection*{Exercise 4.43} 

\textbf{Answer:}\\ 

\[
\text{fl}(a_1b_1 + a_2b_2 + a_3b_3) = \text{fl}(\text{fl}(a_1b_1) + \text{fl}(a_2b_2) + \text{fl}(a_3b_3)) = a_1b_1(1+\delta_1)(1+\delta_4) + a_2b_2(1+\delta_2)(1+\delta_4) + a_3b_3(1+\delta_3)(1+\delta_4)
\]
while \(|\delta_4|<3\epsilon_u \text{and} |\delta_i|<\epsilon_u \text{for} i = 1,2,3\)

So we get:
\[ 
\text{fl}(\sum_i \prod_j a_{i,j}) = \sum_i \text{fl}(\prod_j a_{i,j}) = (1+\delta_0)(\sum_i \prod_j a_{i,j}\times \prod_{j=1}^{\max(j)-1}(\delta_{i,j}))
\]
while \(|\delta_0|<\max(i)\epsilon_u \text{and} |\delta_{i,j}|<\epsilon_u \text{for} i, j\)

\subsection*{Exercise 4.80}

\textbf{Answer:}\\

Assume that $\sin x$ and $\cos x$ are computed with relative error within machine roundoff (this can be satisfied easily by truncating the Taylor series). 

\[
f_A = \text{fl}\left[\frac{\text{fl}(\sin(x))}{\text{fl}(1+\cos x)}\right] \tag{4.47}
\]

that computes $f(x) = \frac{\sin x}{1+\cos x}$ for $x \in (0, \pi/2)$.

By Definition 4.59, it is easy to compute that

\[
\operatorname{cond}_f(x) = \frac{x}{\sin x}.
\]

Furthermore, by Theorem 4.40 and the assumptions on $\sin x$ and $\cos x$, we have

\[
f_A(x) = \frac{\sin x(1+\delta_1)}{(1+\cos x(1+\delta_2))(1+\delta_3)}(1+\delta_4),
\]

where $|\delta_i| \leq \epsilon_u$ for $i=1,2,3,4$. Neglecting the quadratic terms of $O(\delta_i^2)$, the above equation is equivalent to

\[
f_A(x) = \frac{\sin x}{1+\cos x} \left\{1 + \delta_1 + \delta_4 - \delta_3 - \delta_2 \frac{\cos x}{1+\cos x}\right\},
\]

hence we have $\varphi(x) = 3 + \frac{\cos x}{1+\cos x}$ and

\[
\operatorname{cond}_A(x) \leq \frac{\sin x}{x} \left(3 + \frac{\cos x}{1+\cos x}\right).
\]

Hence, $\operatorname{cond}_A(x)$ may be unbounded as $x \rightarrow 0$. On the other hand, $\operatorname{cond}_A(x)$ is controlled by $\frac{6}{\pi}$ as $x \rightarrow \frac{\pi}{2}$.

\subsection*{Question 4.4.1 I}

\textbf{Answer:}\\

First, convert the decimal number 477 to binary:

\[
477 \div 2 = 238 \text{ remainder } 1
\]
\[
238 \div 2 = 119 \text{ remainder } 0
\]
\[
119 \div 2 = 59 \text{ remainder } 1
\]
\[
59 \div 2 = 29 \text{ remainder } 1
\]
\[
29 \div 2 = 14 \text{ remainder } 1
\]
\[
14 \div 2 = 7 \text{ remainder } 0
\]
\[
7 \div 2 = 3 \text{ remainder } 1
\]
\[
3 \div 2 = 1 \text{ remainder } 1
\]
\[
1 \div 2 = 0 \text{ remainder } 1
\]

Reading the remainders from bottom to top, we get:
\[
477 = 111011101_2
\]

Next, normalize the binary representation by moving the binary point to the right of the first 1:
\[
111011101_2 = 1.11011101_2 \times 2^8
\]

The final normalized floating-point representation is:
\[
477 = 1.11011101_2 \times 2^8
\]

\subsection*{Question 4.4.1 II}

\textbf{Answer:}\\

First, convert the fraction \( \frac{3}{5} \) to binary by repeatedly multiplying the decimal part by 2:

\[
0.6 \times 2 = 1.2 \quad \text{(integer part: 1, decimal part: 0.2)}
\]
\[
0.2 \times 2 = 0.4 \quad \text{(integer part: 0, decimal part: 0.4)}
\]
\[
0.4 \times 2 = 0.8 \quad \text{(integer part: 0, decimal part: 0.8)}
\]
\[
0.8 \times 2 = 1.6 \quad \text{(integer part: 1, decimal part: 0.6)}
\]

At this point, the decimal part \( 0.6 \) begins to repeat. Therefore, the binary representation of \( \frac{3}{5} \) is:
\[
\frac{3}{5} = 0.1001\overline{1001}_2
\]
where \( \overline{1001} \) denotes that "1001" repeats indefinitely.

Next, normalize the binary number by moving the decimal point to the right of the first 1:
\[
0.1001\overline{1001}_2 = 1.001\overline{1001}_2 \times 2^{-1}
\]

Thus, the normalized floating-point representation is:
\[
\frac{3}{5} = 1.001\overline{1001}_2 \times 2^{-1}
\]

\subsection*{Question 4.4.1 III}

\textbf{Answer:}\\

Consider FPN system \(\mathcal{F}: (\beta, p, L, U) \text{and} x = \beta^e\)

We can conclude:

\[
x_R-x = \epsilon_M\beta^{1-p}, x-x_L = \epsilon_M\beta^{-p}
\]

So we get:

\[
x_R-x = \beta(x-x_L)
\]

\subsection*{Question 4.4.1 IV}

\textbf{Answer:}\\

From II we get:

\[
\frac{3}{5} = 1.001\overline{1001}_2 \times 2^{-1}
\]
where \( \overline{1001} \) denotes that "1001" repeats indefinitely.

So we can find:

\[
x_1 = 1.00110011001100110011000 \times 2^{-1}, x_2 = 1.00110011001100110011010 \times 2^{-1}
\]
which are the two normalized FPNs adjacent to \(x = 3/5\) under the IEEE 754 single-precision protocol.

\(x_1, x_2\) can be also expressed as follow:

\[
x_1 = 0|01111110|00110011001100110011000, x_2 = 0|01111110|00110011001100110011010
\]

Because \(x = 3/5\) is more close to $x_2$, we get \(\text{fl}(x) = x_2 = 0.60000002384185791016\).

The relative roundoff error:

\[
\frac{|\text{fl}(x)-x|}{x} \approx 3.973642988726785e-08
\]

\subsection*{Question 4.4.1 V}

\textbf{Answer:}\\

The unit roundoff would be $\epsilon_M = \beta^{1-p}$ for FPN system \(\mathcal{F}: (\beta, p, L, U)\).

\subsection*{Question 4.4.1 VI}

\textbf{Answer:}\\

When $x = \frac{1}{4}$, we know $cos(x) = 0.1111100000001010101_2$

So $1 - cos(x) = 0.0000011111110101010_2$.

Thus we can conclude that $6$ bits of precision are lost in the subtraction.

\subsection*{Question 4.4.1 VII}

\textbf{Answer:}\\

To avoid catastrophic cancellation when computing \( 1 - \cos(x) \), we can use the following methods:

\section*{1. Using the Trigonometric Identity for Small Angles}

For small values of \( x \), the expression \( 1 - \cos(x) \) can be rewritten using the identity:

\[
1 - \cos(x) = 2 \sin^2\left(\frac{x}{2}\right)
\]

This identity avoids the subtraction of nearly equal numbers and instead expresses \( 1 - \cos(x) \) in terms of \( \sin^2\left(\frac{x}{2}\right) \), which is numerically more stable.

\section*{2. Using a High-Precision Series Expansion}

Alternatively, for small \( x \), we can use the Taylor series expansion of \( \cos(x) \) around \( x = 0 \):

\[
\cos(x) \approx 1 - \frac{x^2}{2} + \frac{x^4}{24} - \cdots
\]

Thus,

\[
1 - \cos(x) \approx \frac{x^2}{2} - \frac{x^4}{24} + \cdots
\]

This expansion avoids direct subtraction between \( 1 \) and \( \cos(x) \), providing a more stable approximation for small \( x \).

\subsection*{Question 4.4.1 VIII}

\textbf{Answer:}\\

To compute the condition numbers of the following functions, we use the definition of the condition number:

\[
\kappa(f, x_0) = \left| \frac{x_0 f'(x_0)}{f(x_0)} \right|
\]

where \( f(x) \) is the function and \( f'(x) \) is its derivative.

\section*{1. Condition Number of \( (x - 1)^\alpha \)}

Let \( f(x) = (x - 1)^\alpha \). The derivative is:

\[
f'(x) = \alpha (x - 1)^{\alpha - 1}
\]

The condition number is:

\[
\kappa(f, x) = \left| \frac{x f'(x)}{f(x)} \right| = \left| \frac{x \cdot \alpha (x - 1)^{\alpha - 1}}{(x - 1)^\alpha} \right| = \left| \frac{\alpha x}{x - 1} \right|
\]

The condition number is large when \( x \) is near 1, as \( (x - 1) \) becomes small.

\section*{2. Condition Number of \( \ln(x) \)}

Let \( f(x) = \ln(x) \). The derivative is:

\[
f'(x) = \frac{1}{x}
\]

The condition number is:

\[
\kappa(f, x) = \left| \frac{x f'(x)}{f(x)} \right| = \left| \frac{x \cdot \frac{1}{x}}{\ln(x)} \right| = \frac{1}{|\ln(x)|}
\]

The condition number is large when \( \ln(x) \) is close to 0, which happens near \( x = 1 \).

\section*{3. Condition Number of \( e^x \)}

Let \( f(x) = e^x \). The derivative is:

\[
f'(x) = e^x
\]

The condition number is:

\[
\kappa(f, x) = \left| \frac{x f'(x)}{f(x)} \right| = \left| \frac{x e^x}{e^x} \right| = |x|
\]

The condition number is large when \( |x| \) is large.

\section*{4. Condition Number of \( \arccos(x) \)}

Let \( f(x) = \arccos(x) \). The derivative is:

\[
f'(x) = -\frac{1}{\sqrt{1 - x^2}}
\]

The condition number is:

\[
\kappa(f, x) = \left| \frac{x f'(x)}{f(x)} \right| = \left| \frac{x \cdot \left(-\frac{1}{\sqrt{1 - x^2}}\right)}{\arccos(x)} \right| = \frac{|x|}{\sqrt{1 - x^2} \cdot |\arccos(x)|}
\]

The condition number becomes large when \( x \) is near 1, as \( \sqrt{1 - x^2} \) becomes small and \( \arccos(x) \) approaches 0.

\section*{Summary of Condition Numbers}

\begin{itemize}
    \item \( (x - 1)^\alpha \): The condition number is large near \( x = 1 \).
    \item \( \ln(x) \): The condition number is large near \( x = 1 \).
    \item \( e^x \): The condition number is large when \( |x| \) is large.
    \item \( \arccos(x) \): The condition number is large near \( x = 1 \).
\end{itemize}

\subsection*{Question 4.4.1 IX}

\textbf{Answer:}\\

\subsection*{Part 1: Prove that \( \text{cond}_f(x) \leq 1 \) for \( x \in [0, 1] \)}

We are tasked with proving that the condition number \( \text{cond}_f(x) \) for the function \( f(x) = 1 - e^{-x} \) satisfies \( \text{cond}_f(x) \leq 1 \) for \( x \in [0, 1] \).

The condition number is defined as:

\[
\text{cond}_f(x) = \left| \frac{x f'(x)}{f(x)} \right|
\]

First, we compute the derivative of the function \( f(x) = 1 - e^{-x} \):

\[
f'(x) = \frac{d}{dx} \left( 1 - e^{-x} \right) = e^{-x}
\]

Thus, the condition number becomes:

\[
\text{cond}_f(x) = \left| \frac{x e^{-x}}{1 - e^{-x}} \right| = \frac{x}{e^x - 1} \text{ for } x \in [0, 1]
\]

Because \(x \leq e^x-1\), so we get:

\[
\text{cond}_f(x) = \frac{x}{e^x - 1} \leq 1 \text{ for } x \in [0, 1]
\]

\subsection*{Part 2: Estimate the condition number \( \text{cond}_A(x) \) of the algorithm}

For \( x \in [0, 1] \), we get:

\[
\text{cond}_f(x) = \frac{x}{e^x - 1}
\]

\[
\operatorname{cond}_A(x) = \text{fl}(1-\text{fl}(e^{-x})) = (1-e^{-x}(1+\delta_1))(1+\delta_2) = (1-e^{-x})(1+\delta_2-\frac{\delta_1}{e^x-1})
\]

where $|\delta_i| \leq \epsilon_u$ for $i=1,2$.

Hence we have $\varphi(x) = \frac{e^x}{e^x - 1}$ and

\[
\operatorname{cond}_A(x) \leq \frac{e^x}{x}.
\]

\subsection*{Part 3: Plot \( \text{cond}_f(x) \) and the upper bound of \( \text{cond}_A(x) \)}

\begin{figure}[htbp]  
   \centering
   \includegraphics[width=\textwidth]{cond_plot.png}  
   \caption{Condition numbers for $f(x)$ and $A(x)$.}  
   \label{fig:cond_plot}  
\end{figure}

This plot will show the behavior of \( \text{cond}_f(x) \) and \( \text{the upper bound of cond}_A(x) \) over the interval \( x \in [0, 1] \). 

\subsection*{Question 4.4.1 X}

\textbf{Answer:}\\

We are given that the condition number of a nonsingular square matrix \( A \) based on the 2-norm is defined as:

\[
\text{cond}_2(A) := \frac{\|A\|_2 \cdot |A^{-1}|_2}{\|A\|_2 \cdot |A^{-1}|_2} = \frac{\sigma_{\text{max}}}{\sigma_{\text{min}}}
\]
where \( \sigma_{\text{max}} \) and \( \sigma_{\text{min}} \) are the largest and smallest singular values of \( A \), respectively. We also need to prove that if \( A \) is normal, \( \text{cond}_2(A) \) is equal to \( \frac{|\lambda_{\text{max}}|}{|\lambda_{\text{min}}|} \), where \( \lambda_{\text{max}} \) and \( \lambda_{\text{min}} \) are the eigenvalues of \( A \) with the largest and smallest moduli. Finally, if \( A \) is unitary, we show that \( \text{cond}_2(A) = 1 \).

\section*{Part 1: Condition Number in Terms of Singular Values}

We show that \( \|A\|_2 = \sup_{\|v\|_2 = 1} \|Av\|_2 \).

\[
\|A\|_2^2 = \sup_{\|v\|_2 = 1} v^T A^T A v.
\]

By singular value decomposition, we get

\[
\sup_{\|v\|_2 = 1} v^T A^T A v = \sigma_{\text{max}}^2.
\]

Similarly, the 2-norm of \( A^{-1} \) is:

\[
\|A^{-1}\|_2 = \frac{1}{\sigma_{\text{min}}},
\]

where \( \sigma_{\text{min}} \) is the smallest singular value of \( A \), i.e., the smallest square root of the eigenvalues of \( A^T A \). Thus, the condition number of \( A \) in the 2-norm is:

\[
\text{cond}_2(A) = \|A\|_2 \cdot \|A^{-1}\|_2 = \frac{\sigma_{\text{max}}}{\sigma_{\text{min}}}.
\]

\section*{Part 2: Condition Number for Normal Matrices}

Now, suppose \( A \) is a normal matrix, meaning that \( A^T A = A A^T \). A key property of normal matrices is that they can be diagonalized by a unitary matrix. In other words, there exists a unitary matrix \( U \) such that:

\[
A = U \Lambda U^H,
\]

where \( \Lambda \) is a diagonal matrix whose entries are the eigenvalues \( \lambda_1, \lambda_2, \dots, \lambda_n \) of \( A \).

For a normal matrix, the singular values of \( A \) are the absolute values of the eigenvalues, i.e.,

\[
\sigma_i = |\lambda_i| \quad \text{for all} \quad i.
\]

Therefore, the largest singular value is \( \sigma_{\text{max}} = \max_i |\lambda_i| \), and the smallest singular value is \( \sigma_{\text{min}} = \min_i |\lambda_i| \).

Consequently, the condition number for normal matrices becomes:

\[
\text{cond}_2(A) = \frac{\sigma_{\text{max}}}{\sigma_{\text{min}}} = \frac{\max_i |\lambda_i|}{\min_i |\lambda_i|}.
\]

Thus, if \( A \) is normal, the condition number in terms of the eigenvalues is:

\[
\text{cond}_2(A) = \frac{|\lambda_{\text{max}}|}{|\lambda_{\text{min}}|},
\]

where \( \lambda_{\text{max}} \) and \( \lambda_{\text{min}} \) are the eigenvalues of \( A \) with the largest and smallest moduli, respectively.

\section*{Part 3: Condition Number for Unitary Matrices}

Finally, if \( A \) is unitary, then \( A^H A = I \), which implies that the singular values of \( A \) are all equal to 1. This is because the singular values of a matrix are the square roots of the eigenvalues of \( A^H A \), and for a unitary matrix, \( A^H A = I \), so the eigenvalues of \( A^H A \) are all 1.

Therefore, for a unitary matrix, the condition number becomes:

\[
\text{cond}_2(A) = \frac{\sigma_{\text{max}}}{\sigma_{\text{min}}} = \frac{1}{1} = 1.
\]

Thus, for unitary matrices:

\[
\text{cond}_2(A) = 1.
\]

\subsection*{Question 4.4.1 XI}

\textbf{Answer:}\\

We know that \( a_0 + a_1 r + \cdots + a_n r^n = 0 \). We compute the derivative of \( a_i \) on both sides to get

\[
\sum_{k=0}^{n} a_k k r^{k-1} \frac{\partial f}{\partial i} + r^i = 0
\]

So we get the componentwise condition number as 

\begin{align*}
\|A\|_1 &= \sum_{k=0}^{n-1} \left| \frac{a_k \frac{\partial r}{\partial k}}{r} \right| \\
&= \sum_{k=0}^{n-1} \left| \frac{a_k r^{k-1}}{\left( \sum_{k=0}^{n} a_k k r^{k-1} \right)} \right| 
\end{align*}
while $a_n=1$.

For the Wilkinson Example, we can compute 

\[
cond = \left| \frac{r^{n}}{\left( \sum_{k=0}^{n} a_k k r^{k-1} \right)} \right|
\]

So we observe:

\begin{align*}
\|A\|_1 &= \sum_{k=0}^{n-1} \left| \frac{a_k r^{k-1}}{\left( \sum_{k=0}^{n} a_k k r^{k-1} \right)} \right| \\
&\geq \left| \frac{\sum_{k=0}^{n-1} a_k r^{k-1}}{\left( \sum_{k=0}^{n} a_k k r^{k-1} \right)}  \right| \\
&= \left| \frac{r^{n}}{\left( \sum_{k=0}^{n} a_k k r^{k-1} \right)} \right| \\
&= cond
\end{align*}

\subsection*{Question 4.4.1 XII}

\textbf{Answer:}\\

No. 

\textbf{Example:}\\

For FPN system \(\mathcal{F}: (2, 3, -7, 8)\) calculated in a register of precision 6.

\[
x_1=1.00, x_2=1.11
\]

we get:

\[
x_1/x_2=0.100\overline{100}
\]
where \( \overline{100} \) denotes that "100" repeats indefinitely.

so \(\text{f}(x_1/x_2) = 0.10011\), while f() means the result calculated in a register of precision 4.

we get \(\text{fl}(x_1/x_2) = 1.01 \times 2^{-1}\)




\subsection*{Question 4.4.1 XIII}

\textbf{Answer:}\\

According to the IEEE 754 standard, single precision floating point numbers have 24 bits for the significand, which gives a precision of approximately 8 significant decimal digits (specifically about ( \(5.96 \times 10^{-8}\) ) in relative accuracy for numbers near 1).

In the bisection method, to achieve an absolute accuracy of less than $10^{-6}$, we need to calculate the number of iterations n such that $(b - a)/2^n < 10^{-6}$, while $b=129, a=128, b - a = 1$. We need $n > \log_2(10^6) \approx 19.93$. Therefore, we need at least 20 iterations to achieve an absolute accuracy of less than $10^{-6}$.

However, if we consider the propagation of rounding errors, 
\[
\textbf{Total error after 20 iterations}\approx 20 \times 5.96 \times 10^{-8} \approx 1.2 \times 10^{-6} \geq 10^{-6}
\]

By the Analysis, we conclude that we can not compute the root with absolute accuracy < $10^{-6}$

\subsection*{Question 4.4.1 XII}

\textbf{Answer:}\\

\[
A m := \begin{bmatrix}
2 & \mu_{1} & & &   \\
\lambda_{2} & 2 & \mu_{2} & &  \\
& & \ddots & &   \\
 & & \lambda_{N-1} & 2  & \mu_{N-1} \\
& & & \lambda_{N} & 2 
\end{bmatrix}
\begin{bmatrix}
m_{1} \\
m_{2} \\
\vdots \\
m_{N-1} \\
m_{N}
\end{bmatrix}
=
b
\]

When the distance between two adjacent points is much smaller than the others, $\lambda_i, \mu_i$ is large. It might cause \( \text{cond}_A = \|A\| \|A^{-1}\| = \frac{\sigma_{\text{max}}}{\sigma_{\text{min}}} \) to be large, which implies poorer numerical stability in solving the system of linear equations.

% ===============================================
\section*{ \center{\normalsize {Acknowledgement}} }
None.


\printbibliography

\end{document}