\documentclass{article}
\usepackage{amsmath}
\usepackage{graphicx} 
\usepackage[left=1cm,right=1cm,top=1cm,bottom=2cm]{geometry}
\title{NumPDE Homework 1}
\author{Jiang Zhou 3220101339 }
\date{2025/3/1}

\begin{document}

\maketitle

\section{Exercise 7.14}
By Definition 7.13, we get:
\begin{align*}
    &\| \mathbf{g}\|_{L_{\infty}}=\max_{1\leq i\leq N}|g_i|, \\
    &\| \mathbf{g}\|_{L_1}=h\sum_{i=1}^N|g_i|, \\
    &\| \mathbf{g}\|_{L_2}=(h\sum_{i=1}^N|g_i|^2)^{\frac{1}{2}}.
\end{align*}
If the interval is divided into $N$ points with spacing $h$, the grid is such that $N$ is proportional to $1/h$. So \(N \approx \frac{1}{h}\).\\
Then we bring the known conditions (\text{\(g_1 = O(h), g_N = O(h)\), and \(g_j = O(h^2)\) for all \(j = 2, . . . , N-1\).}) into the above equation:
\begin{align*}
    &\| \mathbf{g}\|_{L_{\infty}}=\max_{1\leq i\leq N}|g_i| = O(h), \\
    &\| \mathbf{g}\|_{L_1}=h\sum_{i=1}^N|g_i| 
    = h\cdot[O(h)+ (N-2)\cdot O(h^2)+O(h)] 
    = h\cdot[O(h)+ (\frac{1}{h}-2)\cdot O(h^2)+O(h)] 
    = h\cdot O(h)
    = O(h^2), \\
    &\| \mathbf{g}\|_{L_2}
    =(h\sum_{i=1}^N|g_i|^2)^{\frac{1}{2}}
    =\{h\cdot[O(h)^2+ (N-2)\cdot O(h^2)^2+O(h)^2]\}^{\frac{1}{2}}
    =\{h\cdot[O(h)^2+ (\frac{1}{h}-2)\cdot O(h^2)^2]\}^{\frac{1}{2}}\\
    &= \{h\cdot[O(h^2)+O(h^3)]\}^{\frac{1}{2}}
    = \{h\cdot[O(h^2)]\}^{\frac{1}{2}}
    =\{O(h^3)\}^{\frac{1}{2}}
    = O(h^{\frac{3}{2}}).
\end{align*}

\section{Exercise 7.26}
\begin{align*}
    <\mathbf{w}_i,\mathbf{w}_k> 
    &= \sum_{j=1}^m \sin \frac{ji\pi}{m+1} \cdot \sin\frac{jk\pi}{m+1} 
    = \frac{1}{2} \sum_{j=1}^m [\cos(\frac{ji\pi}{m+1}-\frac{jk\pi}{m+1})-\cos(\frac{ji\pi}{m+1}+\frac{jk\pi}{m+1})]\\
    &=\frac{1}{2} \sum_{j=1}^m \cos[\frac{j(i-k)\pi}{m+1}]-\cos[\frac{j(i+k)\pi}{m+1}]
\end{align*}
By Euler's formula, we get the sum \(\sum_{j=1}^m\cos(j\theta)=\sum_{j=1}^mRe(e^{ij\theta}) = Re(e^{i\theta}\frac{1-e^{im\theta}}{1-e^{i\theta}}) = \frac{\sin(m\theta/2)\cos((m+1)\theta/2)}{sin(\theta/2)}\). Then we assume \(\theta_1 = \frac{(i-k)\pi}{m+1}, \theta_2 = \frac{(i+k)\pi}{m+1}\) and apply the formula to \(<\mathbf{w}_i,\mathbf{w}_k>\).
\begin{align*}
    <\mathbf{w}_i,\mathbf{w}_k> 
    &=\frac{1}{2} \sum_{j=1}^m \cos(j\theta_1)-\cos(j\theta_2) = \frac{1}{2} (\frac{\sin(m\theta_1/2)\cos((m+1)\theta_1/2)}{\sin(\theta_1/2)}-\frac{\sin(m\theta_2/2)\cos((m+1)\theta_2/2)}{\sin(\theta_2/2)})\\
    &= \frac{1}{2} (\frac{\sin(\frac{m(i-k)\pi}{2(m+1)})\cos(\frac{(i-k)\pi}{2})}{\sin\frac{(i-k)\pi}{2(m+1)}}-\frac{\sin(\frac{m(i+k)\pi}{2(m+1)})\cos(\frac{(i+k)\pi}{2})}{\sin\frac{(i+k)\pi}{2(m+1)}})
\end{align*}
If \(i\neq k\) and (\(i-k\)) is odd, \(\cos(\frac{(i+k)\pi}{2}) = \cos(\frac{(i-k)\pi}{2})=0\). So \( <\mathbf{w}_i,\mathbf{w}_k> =0\).\\
If \(i\neq k\) and (\(i-k = 2t\)) is even, \(\cos(\frac{(i-k)\pi}{2})=(-1)^t\).
\begin{align*}
    &\frac{\sin(\frac{m(i-k)\pi}{2(m+1)})\cos(\frac{(i-k)\pi}{2})}{\sin\frac{(i-k)\pi}{2(m+1)}} = \frac{\sin(\frac{mt\pi}{m+1})(-1)^t}{\sin\frac{t\pi}{m+1}} =(-1)^{2t+1}=-1,\\
    &\frac{\sin(\frac{m(i+k)\pi}{2(m+1)})\cos(\frac{(i+k)\pi}{2})}{\sin\frac{(i+k)\pi}{2(m+1)}} = \frac{\sin(\frac{m(k+t)\pi}{m+1})\cos(k+t)\pi}{\sin\frac{(k+t)\pi}{m+1}}= \frac{\sin(\frac{m(k+t)\pi}{m+1})(-1)^{(k+t)}}{\sin\frac{(k+t)\pi}{m+1}} = (-1)^{2k+2t+1} = -1.
\end{align*}
So \(<\mathbf{w}_i,\mathbf{w}_k> = (-1)- (-1)=0\).\\
If \(i=k\), \(<\mathbf{w}_i,\mathbf{w}_k> =\frac{1}{2} \sum_{j=1}^m [1-\cos(\frac{2ij\pi}{m+1})] = \frac{m}{2} - \frac{1}{2}\sum_{j=1}^m \cos(\frac{2ij\pi}{m+1})\).\\
Now we just need to follow the previous steps to compute\(\sum_{j=1}^m \cos(\frac{2ij\pi}{m+1})\).
\begin{align*}
    \sum_{j=1}^m \cos(\frac{2ij\pi}{m+1})=\frac{\sin(\frac{mi\pi}{m+1})\cos(i\pi)}{\sin(\frac{i\pi}{m+1})} = -1 .
\end{align*}
As a result, \(<\mathbf{w}_i,\mathbf{w}_k> = \frac{m}{2} - \frac{1}{2}\cdot(-1)=\frac{m+1}{2},\textbf{when\quad \(i=k\)}\).

\section{Exercise 7.37}
To show that all elements of the first column of \( B_E = A_E^{-1} \) are \( O(1) \), we proceed with the following analysis.

First, recall the definition of the matrix \( A_E \):

\[
A_E = \frac{1}{h^2}
\begin{bmatrix}
-h & h & & & \\
1 & -2 & 1 & & \\
& 1 & -2 & 1 & \\
& & \ddots & \ddots & \ddots \\
& & & 1 & -2 & 1 \\
& & & & 1 & -2 & 1 \\
& & & & & 0 & h^2 \\
\end{bmatrix}
\]

We need to find the first column of \( B_E = A_E^{-1} \). Let the first column of \( B_E \) be \( \mathbf{b}_{E,0} \), i.e.,

\[
B_E = \begin{bmatrix}
\mathbf{b}_{E,0} & \mathbf{b}_{E,1} & \cdots & \mathbf{b}_{E,m+1}
\end{bmatrix}
\]

By the definition of matrix inversion, we have:

\[
A_E \mathbf{b}_{E,0} = \mathbf{e}_0
\]

where \( \mathbf{e}_0 \) is the first standard basis vector, i.e., \( \mathbf{e}_0 = [1, 0, \ldots, 0]^T \).

Next, we analyze the elements of \( \mathbf{b}_{E,0} \). Since \( A_E \) is a tridiagonal matrix, we can use a recurrence relation to express the elements of \( \mathbf{b}_{E,0} \). Let \( \mathbf{b}_{E,0} = [b_0, b_1, \ldots, b_{m+1}]^T \). Then, we have:

\[
\frac{1}{h^2} (-h b_0 + h b_1) = 1
\]
\[
\frac{1}{h^2} (b_{i-1} - 2b_i + b_{i+1}) = 0 \quad \text{for} \quad i = 1, 2, \ldots, m
\]
\[
\frac{1}{h^2} (b_m - 2b_{m+1}) = 0
\]

From the first equation, we obtain:

\[
-h b_0 + h b_1 = h^2 \implies b_1 - b_0 = h
\]

From the second equation, we derive the recurrence relation:

\[
b_{i+1} = 2b_i - b_{i-1}
\]

From the third equation, we get:

\[
b_{m+1} = \frac{b_m}{2}
\]

Using these relations, we can derive the elements of \( \mathbf{b}_{E,0} \). Since these recurrence relations are linear and the coefficients are constants, the elements of \( \mathbf{b}_{E,0} \) do not grow or shrink as \( h \) decreases, meaning they are \( O(1) \).

Therefore, we conclude that all elements of the first column of \( B_E = A_E^{-1} \) are \( O(1) \).

\[
\boxed{\text{All elements are } O(1)}
\]

\section{Exercise 7.42}
The LTE of the FD method in Example 7.41 is 
\begin{align*}
    \tau_{i,j}=-\frac{U_{i-1,j}-2U_{i,j}+U_{i+1,j}}{h^2}-\frac{U_{i,j-1}-2U_{i,j}+U_{i,j+1}}{h^2}-[-\frac{\partial^2}{\partial^2x}u(x,y)-\frac{\partial^2}{\partial^2y}u(x,y)]|_{(x_i,y_i)}
\end{align*}
By Taylor expansion, we simplify the formula:
\begin{align*}
    U_{i-1,j}-2U_{i,j}+U_{i+1,j}
    &= u(x_{i-1},y_j)-2u(x_{i},y_j)+u(x_{i+1},y_j) \\
    &= u(x_{i},y_j)+\frac{\partial}{\partial x}u(x_i,y_j)(x_{i-1}-x_i) +\frac{1}{2}\frac{\partial^2}{\partial x^2}u(x_i,y_j)(x_{i-1}-x_i)^2 \\
    &+ \frac{1}{6}\frac{\partial^3}{\partial x^3}u(x_i,y_j)(x_{i-1}-x_i)^3 +\frac{1}{24}\frac{\partial^4}{\partial x^4}u(x_i,y_j)(x_{i-1}-x_i)^4 \\
    &+\frac{1}{5!}\frac{\partial^5}{\partial x^5}u(x_i,y_j)(x_{i-1}-x_i)^5+O(h^6)\\
    &-2u(x_{i},y_j)+u(x_{i},y_j)+\frac{\partial}{\partial x}u(x_i,y_j)(x_{i+1}-x_i) +\frac{1}{2}\frac{\partial^2}{\partial x^2}u(x_i,y_j)(x_{i+1}-x_i)^2 \\
    &+ \frac{1}{6}\frac{\partial^3}{\partial x^3}u(x_i,y_j)(x_{i+1}-x_i)^3 +\frac{1}{24}\frac{\partial^4}{\partial x^4}u(x_i,y_j)(x_{i+1}-x_i)^4 \\
    &+\frac{1}{5!}\frac{\partial^5}{\partial x^5}u(x_i,y_j)(x_{i+1}-x_i)^5+O(h^6)\\
    & = \frac{\partial^2}{\partial x^2}u(x_i,y_i)h^2+\frac{1}{12}\frac{\partial^4}{\partial x^4}u(x_i,y_i)h^4 +O(h^6)
\end{align*}
In a similar way, we can get \(U_{i,j-1}-2U_{i,j}+U_{i,j+1}=\frac{\partial^2}{\partial y^2}u(x_i,y_i)h^2+\frac{1}{12}\frac{\partial^4}{\partial y^4}u(x_i,y_i)h^4 +O(h^6)\).
Apply the above two formulas into the $\tau_{i,j} $:
\begin{align*}
    \tau_{i,j} &= -[\frac{\partial^2}{\partial x^2}u(x_i,y_i)+\frac{1}{12}\frac{\partial^4}{\partial x^4}u(x_i,y_i)h^2 +O(h^4)+\frac{\partial^2}{\partial y^2}u(x_i,y_i)+\frac{1}{12}\frac{\partial^4}{\partial y^4}u(x_i,y_i)h^2 +O(h^4)]-[-\frac{\partial^2}{\partial^2x}u(x,y)-\frac{\partial^2}{\partial^2y}u(x,y)]|_{(x_i,y_i)} \\
    &= -\frac{1}{12}h^2(\frac{\partial^4}{\partial^4x}u(x,y)+\frac{\partial^4}{\partial^4y}u(x,y))|(x_i,y_i)+O(h^4)
\end{align*}
\section{Exercise 7.62}
\subsection{At an irregular equation-discretization point}
The LTE of the FD method in Example 7.41 is 
\begin{align*}
    \tau_{P}=-\frac{U_{A}-(1+\theta)U_{P}+\theta U_{W}}{\frac{1}{2}\theta(1+\theta)h^2}-\frac{U_{B}-(1+\alpha)U_{P}+\alpha U_{S}}{\frac{1}{2}\alpha(1+\alpha)h^2}-[-\frac{\partial^2}{\partial^2x}u(x,y)-\frac{\partial^2}{\partial^2y}u(x,y)]|_{(x_P,y_P)}
\end{align*}
By Taylor expansion, we simplify the formula:
\begin{align*}
    U_{A}-(1+\theta)U_{P}+\theta U_{W}
    &= U_P+\frac{\partial}{\partial x}u(x_P,y_P)\cdot\theta h+\frac{1}{2}\frac{\partial^2}{\partial x^2}u(x_P,y_P)\cdot(\theta h)^2+O(h^3)\\
    &-(1+\theta)U_{P}\\
    &+\theta[U_P+\frac{\partial}{\partial x}u(x_P,y_P)\cdot(-h)+\frac{1}{2}\frac{\partial^2}{\partial x^2}u(x_P,y_P)\cdot h^2+O(h^3)]\\ 
    = \frac{\theta(1+\theta)}{2}\frac{\partial^2}{\partial x^2}u(x_P,y_P)\cdot h^2+O(h^3)
\end{align*}
In a similar way, we can get \(U_{B}-(1+\alpha)U_{P}+\alpha U_{S}=\frac{\alpha(1+\alpha)}{2}\frac{\partial^2}{\partial y^2}u(x_P,y_P)\cdot h^2+O(h^3)\).\\
Apply the above two formulas into the $\tau_{P} $:
\begin{align*}
    \tau_{P} &= -\frac{\frac{\theta(1+\theta)}{2}\frac{\partial^2}{\partial x^2}u(x_P,y_P)\cdot h^2+O(h^3)}{\frac{1}{2}\theta(1+\theta)h^2}-\frac{\frac{\alpha(1+\alpha)}{2}\frac{\partial^2}{\partial y^2}u(x_P,y_P)\cdot h^2+O(h^3)}{\frac{1}{2}\alpha(1+\alpha)h^2}-[-\frac{\partial^2}{\partial^2x}u(x,y)-\frac{\partial^2}{\partial^2y}u(x,y)]|_{(x_P,y_P)} \\
    &=-\frac{\partial^2}{\partial x^2}u(x_P,y_P)-\frac{\partial^2}{\partial y^2}u(x_P,y_P)+O(h)-[-\frac{\partial^2}{\partial^2x}u(x,y)-\frac{\partial^2}{\partial^2y}u(x,y)]|_{(x_P,y_P)} = O(h).
\end{align*}

\subsection{At a irregular equation-discretization point}
As shown in Exercise 7.42, \(\tau_P=-\frac{1}{12}h^2(\frac{\partial^4}{\partial^4x}u(x,y)+\frac{\partial^4}{\partial^4y}u(x,y))|(x_P,y_P)+O(h^4)=O(h^2).\)

\section{Exercise 7.64}
Choose \(\varphi(P)= \phi(P)\max\{\frac{T_1}{C_1},\frac{T_2}{C_2}\}\).
The nonnegative function $\varphi$ satisfies:
\begin{align*}
    &\forall P\in X_1, L_h\varphi_P=L_h\phi(P)\max\{\frac{T_1}{C_1},\frac{T_2}{C_2}\}\leq-C_1\cdot \max\{\frac{T_1}{C_1},\frac{T_2}{C_2}\}\leq-T_1,\\
    &\forall P\in X_2, L_h\varphi_P=L_h\phi(P)\max\{\frac{T_1}{C_1},\frac{T_2}{C_2}\}\leq-C_2\cdot \max\{\frac{T_1}{C_1},\frac{T_2}{C_2}\}\leq-T_2
\end{align*}
Since \(L_hE_p=T_P\), the operator applied to \(\varphi(P)+ E_P\)yields \(L_h(\varphi(P)+ E_P)\leq-T_1+T_P\leq0,where P\in X_1\) and similarly \(L_h(\varphi(P)+ E_P)\leq-T_2+T_P\leq0,where P\in X_2\).\\
By the discrete maximum principle, \(\varphi(P)+ E_P\) attains its maximum on the boundary \(X_{\partial\Omega}\).\\
So \(\varphi(P)+ E_P\leq \varphi(Q)+ E_Q,whereP\in X_\Omega,Q\in X_{\partial\Omega}\).\\
On the boundary, \(E_Q=0\) due to the exact boundary conditions. 
\begin{align*}
    |E_P|\leq|\varphi(P)+ E_P|\leq |\varphi(Q)| = (\max_{Q\in X_{\partial\Omega}}\phi(Q))\max\{\frac{T_1}{C_1},\frac{T_2}{C_2}\}
\end{align*}

\end{document}
