\documentclass[a4paper]{article}
\usepackage[affil-it]{authblk}
\usepackage[backend=bibtex,style=numeric]{biblatex}

\usepackage{geometry}
\usepackage{amsmath, amssymb}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

\addbibresource{citation.bib}

\begin{document}
% =================================================
\title{NumPDE homework \# 1}

\author{wangjie 3220100105
  \thanks{Electronic address: \texttt{3220100105@zju.edu.cn}}}
\affil{(math), Zhejiang University }


\date{Due time: \today}

\maketitle

\begin{abstract}
    theoretical homework     
\end{abstract}





% ============================================
\section*{theoretical homework}

All exercises in Chapter 7.
\cite{zqh}

\subsection*{Exercise 7.14}  

We analyze the norms of the grid function \( g \) under the given conditions. The grid function \( g \) is defined on \( X = \{x_1, x_2, \dots, x_N\} \), with the following properties:
\begin{itemize}
    \item \( g_1 = O(h) \),
    \item \( g_N = O(h) \),
    \item \( g_j = O(h^2) \) for \( j = 2, \dots, N-1 \).
\end{itemize}

We need to show the following norms:
\begin{itemize}
    \item \( \|g\|_{L^\infty} = O(h) \),
    \item \( \|g\|_{L^1} = O(h^2) \),
    \item \( \|g\|_{L^2} = O(h^{3/2}) \).
\end{itemize}

\subsection*{1. \( \|g\|_{L^\infty} = O(h) \)}

The \( L^\infty \)-norm (max-norm) of \( g \) is defined as:
\[
\|g\|_{L^\infty} = \max_{1 \leq j \leq N} |g_j|.
\]

From the given conditions:
\begin{itemize}
    \item \( |g_1| = O(h) \),
    \item \( |g_N| = O(h) \),
    \item \( |g_j| = O(h^2) \) for \( j = 2, \dots, N-1 \).
\end{itemize}

Since \( O(h) \) dominates \( O(h^2) \) as \( h \to 0 \), the maximum value of \( |g_j| \) is \( O(h) \). Thus:
\[
\|g\|_{L^\infty} = O(h).
\]

\subsection*{2. \( \|g\|_{L^1} = O(h^2) \)}

The \( L^1 \)-norm of \( g \) is defined as:
\[
\|g\|_{L^1} = h \sum_{i=1}^N |g_i|.
\]

From the given conditions:
\begin{itemize}
    \item \( |g_1| = O(h) \),
    \item \( |g_N| = O(h) \),
    \item \( |g_j| = O(h^2) \) for \( j = 2, \dots, N-1 \).
\end{itemize}

The \( L^1 \)-norm becomes:
\[
\|g\|_{L^1} = h \left( |g_1| + |g_N| + \sum_{j=2}^{N-1} |g_j| \right).
\]

Substituting the orders of magnitude:
\[
\|g\|_{L^1} = h \left( O(h) + O(h) + (N-2) \cdot O(h^2) \right).
\]

Assuming \( N = O(1/h) \) (a typical grid size for discretization), we have \( N-2 = O(1/h) \). Thus:
\[
\|g\|_{L^1} = h \left( O(h) + O(h) + O(1/h) \cdot O(h^2) \right) = h \cdot O(h) = O(h^2).
\]

Therefore:
\[
\|g\|_{L^1} = O(h^2).
\]

\subsection*{3. \( \|g\|_{L^2} = O(h^{3/2}) \)}

The \( L^2 \)-norm of \( g \) is defined as:
\[
\|g\|_{L^2} = \left( h \sum_{i=1}^N |g_i|^2 \right)^{\frac{1}{2}}.
\]

From the given conditions:
\begin{itemize}
    \item \( |g_1|^2 = O(h^2) \),
    \item \( |g_N|^2 = O(h^2) \),
    \item \( |g_j|^2 = O(h^4) \) for \( j = 2, \dots, N-1 \).
\end{itemize}

The \( L^2 \)-norm becomes:
\[
\|g\|_{L^2} = \left( h \left( |g_1|^2 + |g_N|^2 + \sum_{j=2}^{N-1} |g_j|^2 \right) \right)^{\frac{1}{2}}.
\]

Substituting the orders of magnitude:
\[
\|g\|_{L^2} = \left( h \left( O(h^2) + O(h^2) + (N-2) \cdot O(h^4) \right) \right)^{\frac{1}{2}}.
\]

Assuming \( N = O(1/h) \), we have \( N-2 = O(1/h) \). Thus:
\[
\|g\|_{L^2} = \left( h \left( O(h^2) + O(h^2) + O(1/h) \cdot O(h^4) \right) \right)^{\frac{1}{2}}.
\]

Simplifying:
\[
\|g\|_{L^2} = \left( h \cdot O(h^2) \right)^{\frac{1}{2}} = \left( O(h^3) \right)^{\frac{1}{2}} = O(h^{3/2}).
\]

Therefore:
\[
\|g\|_{L^2} = O(h^{3/2}).
\]

\subsection*{Summary of Results}

\begin{itemize}
    \item \( \|g\|_{L^\infty} = O(h) \),
    \item \( \|g\|_{L^1} = O(h^2) \),
    \item \( \|g\|_{L^2} = O(h^{3/2}) \).
\end{itemize}
  
\subsection*{Exercise 7.26}  

We need to show that the set of eigenvectors \( \{w_k\} \) of the matrix \( A \) defined in (7.13) is orthogonal, i.e.,
\[
\langle w_i, w_k \rangle = 
\begin{cases} 
0 & \text{if } i \neq k, \\
\frac{m+1}{2} & \text{if } i = k,
\end{cases}
\]
where \( \langle \cdot, \cdot \rangle \) denotes the dot product.

From Lemma 7.25, the eigenvalues \( \lambda_k \) and eigenvectors \( w_k \) of the matrix \( A \) are given by:
\[
\lambda_k(A) = \frac{4}{h^2} \sin^2\left(\frac{k\pi}{2(m+1)}\right),
\]
\[
w_{k,j} = \sin\left(\frac{jk\pi}{m+1}\right),
\]
where \( j, k = 1, 2, \dots, m \).

\subsection*{Step 1: Definition of Eigenvectors}

The \( j \)-th component of the eigenvector \( w_k \) is:
\[
w_{k,j} = \sin\left(\frac{jk\pi}{m+1}\right).
\]

Thus, the dot product \( \langle w_i, w_k \rangle \) is:
\[
\langle w_i, w_k \rangle = \sum_{j=1}^m w_{i,j} w_{k,j} = \sum_{j=1}^m \sin\left(\frac{ji\pi}{m+1}\right) \sin\left(\frac{jk\pi}{m+1}\right).
\]

\subsection*{Step 2: Simplify Using Trigonometric Identity}

Using the trigonometric identity:
\[
\sin(a) \sin(b) = \frac{1}{2} \left[ \cos(a - b) - \cos(a + b) \right],
\]
we can rewrite the dot product as:
\[
\langle w_i, w_k \rangle = \frac{1}{2} \sum_{j=1}^m \left[ \cos\left(\frac{j(i-k)\pi}{m+1}\right) - \cos\left(\frac{j(i+k)\pi}{m+1}\right) \right].
\]

\subsection*{Step 3: Case Analysis}

\subsubsection*{Case 1: \( i \neq k \)}

When \( i \neq k \), we need to show:
\[
\langle w_i, w_k \rangle = 0.
\]

Consider the sums:
\[
\sum_{j=1}^m \cos\left(\frac{j(i-k)\pi}{m+1}\right) \quad \text{and} \quad \sum_{j=1}^m \cos\left(\frac{j(i+k)\pi}{m+1}\right).
\]

Since \( i-k, i+k \) are non-zero integers and have the same parity:
\[
\sum_{j=1}^m \cos\left(\frac{j(i-k)\pi}{m+1}\right) \quad = \quad \sum_{j=1}^m \cos\left(\frac{j(i+k)\pi}{m+1}\right).
\]

Thus:
\[
\langle w_i, w_k \rangle = 0.
\]

\subsubsection*{Case 2: \( i = k \)}

When \( i = k \), we need to show:
\[
\langle w_i, w_i \rangle = \frac{m+1}{2}.
\]

The dot product becomes:
\[
\langle w_i, w_i \rangle = \sum_{j=1}^m \sin^2\left(\frac{ji\pi}{m+1}\right).
\]

Using the trigonometric identity:
\[
\sin^2(x) = \frac{1 - \cos(2x)}{2},
\]
we rewrite the sum as:
\[
\langle w_i, w_i \rangle = \sum_{j=1}^m \frac{1 - \cos\left(\frac{2ji\pi}{m+1}\right)}{2} = \frac{1}{2} \sum_{j=1}^m 1 - \frac{1}{2} \sum_{j=1}^m \cos\left(\frac{2ji\pi}{m+1}\right).
\]

The first term is:
\[
\frac{1}{2} \sum_{j=1}^m 1 = \frac{m}{2}.
\]

The second term is:
\[
\frac{1}{2} \sum_{j=1}^m \cos\left(\frac{2ji\pi}{m+1}\right).
\]

Since \(2i\) is an non-zero and even number for \( i = 1, 2, \dots, m \), we have:
\[
\sum_{j=1}^m \cos\left(\frac{2ji\pi}{m+1}\right) = -1.
\]

Therefore:
\[
\langle w_i, w_i \rangle = \frac{m}{2} - \frac{1}{2} (-1) = \frac{m}{2} + \frac{1}{2} = \frac{m+1}{2}.
\]

\section*{Conclusion}

We have shown that:
\begin{itemize}
    \item When \( i \neq k \), \( \langle w_i, w_k \rangle = 0 \);
    \item When \( i = k \), \( \langle w_i, w_i \rangle = \frac{m+1}{2} \).
\end{itemize}

Thus, the set of eigenvectors \( \{w_k\} \) is orthogonal.

\subsection*{Exercise 7.37}

We need to show that all elements of the first column of \( B_E = A_E^{-1} \) are \( O(1) \), where the matrix \( A_E \) is defined as:

\[
A_E = \frac{1}{h^2}
\begin{bmatrix}
-h & h &   &   &   \\
1 & -2 & 1 &   &   \\
  & 1 & -2 & 1 &   \\
  &   & \ddots & \ddots & \ddots \\
  &   &  &1 & -2  & 1   \\  
  &   &  &  & 0 & h^2 
\end{bmatrix}.
\]

\subsection*{Step 1: Understanding the Problem}

The first column of \( B_E = A_E^{-1} \) is the solution to the linear system:
\[
A_E \cdot \mathbf{x} = \mathbf{e}_1,
\]
where \( \mathbf{e}_1 = [1, 0, \dots, 0]^T \) is the first standard basis vector.

\subsection*{Step 2: Writing the Linear System}

Let \( \mathbf{x} = [x_1, x_2, \dots, x_N]^T \). The system \( A_E \cdot \mathbf{x} = \mathbf{e}_1 \) expands to the following equations:

1. First row:
   \[
   \frac{1}{h^2} (-h x_1 + h x_2) = 1 \implies -x_1 + x_2 = h.
   \]
2. Intermediate rows (\( i = 2, \dots, N-1 \)):
   \[
   \frac{1}{h^2} (x_{i-1} - 2x_i + x_{i+1}) = 0 \implies x_{i-1} - 2x_i + x_{i+1} = 0.
   \]
3. Last row:
   \[
   x_N = 0.
   \]

\subsection*{Step 3: Solving the Recurrence Relation}

The intermediate rows give a recurrence relation:
\[
x_{i+1} = 2x_i - x_{i-1}.
\]

Thus, the solution is:
\[
x_i = h (i - N).
\]

\subsection*{Step 4: Analyzing the Order of the Solution}

For \( i = 1, 2, \dots, N \), we have:
\[
x_i = h (i - N).
\]

Since \( N = O(1/h) \), it follows that:
\[
x_i = h (i - O(1/h)) = O(h \cdot 1/h) = O(1).
\]

\section*{Conclusion}

We have shown that all elements of the first column of \( B_E = A_E^{-1} \) are \( O(1) \).

\subsection*{Exercise 7.42}

We need to show that the Local Truncation Error (LTE) of the Finite Difference (FD) method in Example 7.41 is:

\[
\tau_{i,j} = -\frac{1}{12} h^2 \left( \frac{\partial^4 u}{\partial x^4} + \frac{\partial^4 u}{\partial y^4} \right)\bigg|_{(x_i, y_j)} + O(h^4).
\]

\subsection*{Step 1: Definition of Local Truncation Error}

The Local Truncation Error (LTE) is the difference between the exact differential equation and its finite difference approximation. For the Poisson equation, the LTE is given by:

\[
\tau_{i,j} = - \left( \frac{\hat{U}_{i-1,j} - 2\hat{U}_{ij} + \hat{U}_{i+1,j}}{h^2} + \frac{\hat{U}_{i,j-1} - 2\hat{U}_{ij} + \hat{U}_{i,j+1}}{h^2} \right) - \left( -\left( \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} \right)\bigg|_{(x_i, y_j)} \right).
\]

Here, \(\hat{U}_{i,j} = u(x_i, y_j)\) represents the exact solution at the grid point \((x_i, y_j)\).

\subsection*{Step 2: Taylor Series Expansion}

We perform Taylor series expansions of \(\hat{U}_{i-1,j}\), \(\hat{U}_{i+1,j}\), \(\hat{U}_{i,j-1}\), and \(\hat{U}_{i,j+1}\) around the point \((x_i, y_j)\), including an additional term for higher accuracy:

1. For \(\hat{U}_{i-1,j}\):
   \[
   \hat{U}_{i-1,j} = u(x_i - h, y_j) = u(x_i, y_j) - h \frac{\partial u}{\partial x} + \frac{h^2}{2} \frac{\partial^2 u}{\partial x^2} - \frac{h^3}{6} \frac{\partial^3 u}{\partial x^3} + \frac{h^4}{24} \frac{\partial^4 u}{\partial x^4} - \frac{h^5}{120} \frac{\partial^5 u}{\partial x^5} + O(h^6).
   \]

2. For \(\hat{U}_{i+1,j}\):
   \[
   \hat{U}_{i+1,j} = u(x_i + h, y_j) = u(x_i, y_j) + h \frac{\partial u}{\partial x} + \frac{h^2}{2} \frac{\partial^2 u}{\partial x^2} + \frac{h^3}{6} \frac{\partial^3 u}{\partial x^3} + \frac{h^4}{24} \frac{\partial^4 u}{\partial x^4} + \frac{h^5}{120} \frac{\partial^5 u}{\partial x^5} + O(h^6).
   \]

3. For \(\hat{U}_{i,j-1}\):
   \[
   \hat{U}_{i,j-1} = u(x_i, y_j - h) = u(x_i, y_j) - h \frac{\partial u}{\partial y} + \frac{h^2}{2} \frac{\partial^2 u}{\partial y^2} - \frac{h^3}{6} \frac{\partial^3 u}{\partial y^3} + \frac{h^4}{24} \frac{\partial^4 u}{\partial y^4} - \frac{h^5}{120} \frac{\partial^5 u}{\partial y^5} + O(h^6).
   \]

4. For \(\hat{U}_{i,j+1}\):
   \[
   \hat{U}_{i,j+1} = u(x_i, y_j + h) = u(x_i, y_j) + h \frac{\partial u}{\partial y} + \frac{h^2}{2} \frac{\partial^2 u}{\partial y^2} + \frac{h^3}{6} \frac{\partial^3 u}{\partial y^3} + \frac{h^4}{24} \frac{\partial^4 u}{\partial y^4} + \frac{h^5}{120} \frac{\partial^5 u}{\partial y^5} + O(h^6).
   \]

\subsection*{Step 3: Substituting into the Finite Difference Formula}

Substitute the Taylor series expansions into the finite difference formula:

\[
\frac{\hat{U}_{i-1,j} - 2\hat{U}_{ij} + \hat{U}_{i+1,j}}{h^2} = \frac{\partial^2 u}{\partial x^2} + \frac{h^2}{12} \frac{\partial^4 u}{\partial x^4} + O(h^4),
\]

\[
\frac{\hat{U}_{i,j-1} - 2\hat{U}_{ij} + \hat{U}_{i,j+1}}{h^2} = \frac{\partial^2 u}{\partial y^2} + \frac{h^2}{12} \frac{\partial^4 u}{\partial y^4} + O(h^4).
\]

Thus, the finite difference approximation becomes:

\[
-\left( \frac{\partial^2 u}{\partial x^2} + \frac{h^2}{12} \frac{\partial^4 u}{\partial x^4} + \frac{\partial^2 u}{\partial y^2} + \frac{h^2}{12} \frac{\partial^4 u}{\partial y^4} \right) + O(h^4).
\]

\subsection*{Step 4: Calculating the Local Truncation Error}

The Local Truncation Error is:

\[
\tau_{i,j} = - \left( \frac{\hat{U}_{i-1,j} - 2\hat{U}_{ij} + \hat{U}_{i+1,j}}{h^2} + \frac{\hat{U}_{i,j-1} - 2\hat{U}_{ij} + \hat{U}_{i,j+1}}{h^2} \right) - \left( -\left( \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} \right)\bigg|_{(x_i, y_j)} \right).
\]

Substituting the finite difference approximation, we get:

\[
\tau_{i,j} = -\left( \frac{\partial^2 u}{\partial x^2} + \frac{h^2}{12} \frac{\partial^4 u}{\partial x^4} + \frac{\partial^2 u}{\partial y^2} + \frac{h^2}{12} \frac{\partial^4 u}{\partial y^4} \right) + \left( \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} \right) + O(h^4).
\]

Simplifying, we obtain:

\[
\tau_{i,j} = -\frac{h^2}{12} \left( \frac{\partial^4 u}{\partial x^4} + \frac{\partial^4 u}{\partial y^4} \right) + O(h^4).
\]

\section*{Conclusion}

We have shown that the Local Truncation Error of the Finite Difference method is:

\[
\tau_{i,j} = -\frac{1}{12} h^2 \left( \frac{\partial^4 u}{\partial x^4} + \frac{\partial^4 u}{\partial y^4} \right)\bigg|_{(x_i, y_j)} + O(h^4).
\]

\subsection*{Exercise 7.62}

We need to show that in Example 7.61:
\begin{enumerate}
    \item The Local Truncation Error (LTE) at a regular equation-discretization point is \( O(h^2) \).
    \item The Local Truncation Error (LTE) at an irregular equation-discretization point is \( O(h) \).
\end{enumerate}

\subsection*{1. Local Truncation Error at a Regular Point}

According to Exercise 7.42, we have:

\[
\tau_{i,j} = -\frac{1}{12} h^2 \left( \frac{\partial^4 u}{\partial x^4} + \frac{\partial^4 u}{\partial y^4} \right)\bigg|_{(x_i, y_j)} + O(h^4).
\]

Therefore, the LTE at a regular equation-discretization point is \( O(h^2) \).

\subsection*{2. Local Truncation Error at an Irregular Point}

At an irregular equation-discretization point, the discrete operator \( L_h U_P \) is given by:

\[
L_h U_P := \frac{(1 + \theta) U_P - U_A - \theta U_W}{\frac{1}{2} \theta (1 + \theta) h^2} + \frac{(1 + \alpha) U_P - U_B - \alpha U_S}{\frac{1}{2} \alpha (1 + \alpha) h^2}.
\]

Let \(\hat{U}_{i,j}\) denote the exact solution at the grid point \((x_i, y_j)\), that is, \(\hat{U}_{i,j} = u(x_i, y_i)\).

Next, perform Taylor series expansions of \(\hat{U_A}\), \(\hat{U_W}\), \(\hat{U_B}\), and \(\hat{U_S}\) around the point \(P = (x_p, y_p)\text{ , and } \hat{U_P}=\hat{U}_{i,j}\):

\[
\hat{U_A} = u(x_P + \theta h, y_P) = \hat{U}_{i,j} + \theta h \frac{\partial u}{\partial x} + \frac{\theta^2 h^2}{2} \frac{\partial^2 u}{\partial x^2} + \frac{\theta^3 h^3}{6} \frac{\partial^3 u}{\partial x^3} + O(h^4),
\]

\[
\hat{U_W} = u(x_P - h, y_P) = \hat{U}_{i,j} - h \frac{\partial u}{\partial x} + \frac{h^2}{2} \frac{\partial^2 u}{\partial x^2} - \frac{h^3}{6} \frac{\partial^3 u}{\partial x^3} + O(h^4),
\]

\[
\hat{U_B} = u(x_P, y_P + \alpha h) = \hat{U}_{i,j} + \alpha h \frac{\partial u}{\partial y} + \frac{\alpha^2 h^2}{2} \frac{\partial^2 u}{\partial y^2} + \frac{\alpha^3 h^3}{6} \frac{\partial^3 u}{\partial y^3} + O(h^4),
\]

\[
\hat{U_S} = u(x_P, y_P - h) = \hat{U}_{i,j} - h \frac{\partial u}{\partial y} + \frac{h^2}{2} \frac{\partial^2 u}{\partial y^2} - \frac{h^3}{6} \frac{\partial^3 u}{\partial y^3} + O(h^4).
\]

Now, we substitute these expansions into the discrete operator \( L_h U_P \) and consider the expression for the Local Truncation Error \( \tau_{i,j} \):

\[
\tau_{i,j} = -L_h \hat{U_P} + \left( \frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} \right)\bigg|_{(x_i, y_j)}.
\]

So, we get:

\[
\tau_{i,j} = \frac{(\theta-1)h}{3} \frac{\partial^3 u}{\partial x^3} + \frac{(\alpha-1)h}{3} \frac{\partial^3 u}{\partial y^3} + O(h^2).
\]

So we find that the leading term of the Local Truncation Error is \( O(h) \). Therefore, the LTE at an irregular equation-discretization point is \( O(h) \).

\subsection*{Conclusion}

We have shown that:
\begin{itemize}
    \item The Local Truncation Error at a regular equation-discretization point is \( O(h^2) \).
    \item The Local Truncation Error at an irregular equation-discretization point is \( O(h) \).
\end{itemize}

\subsection*{Exercise 7.64}

We prove Theorem 7.63 by constructing a function \( \psi \) and applying Lemma 7.58 (Discrete Maximum Principle).

\subsection*{Step 1: Define the Function \( \psi \)}

Define the function \( \psi : X \to \mathbb{R} \) as:

\[
\psi_P = \varphi_P \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} + E_P,
\]

where:
\begin{itemize}
    \item \( \varphi_P \) is the nonnegative function defined in Theorem 7.63.
    \item \( \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \) is a constant.
\end{itemize}

\subsection*{Step 2: Verify the Properties of \( \psi \)}

\begin{enumerate}
    \item \textbf{Nonnegativity}:
    Since \( \varphi_P \) is nonnegative and \( \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \) is a positive constant, at the boundary point \( Q \in X_{\partial \Omega} \), due to the Dirichlet condition, \( E_Q = 0 \). Therefore:
\[
\psi_Q = E_Q + \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \varphi_Q = \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \varphi_Q \geq 0.
\]

Thus, \( \max_{P \in X} \psi_P \geq 0 \).we have \( \psi_P \geq 0 \) for all \( P \in X \).

    \item \textbf{Action of the Discrete Operator}:
    \begin{itemize}
        \item For \( P \in X_1 \):
        \[
        L_h \psi_P = T_P + L_h \left( \varphi_P \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \right) \leq T_P - T_1 \leq 0.
        \]

        because\(|T_P| < T_1\).

        \item For \( P \in X_2 \):
        \[
        L_h \psi_P = T_P + L_h \left( \varphi_P \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \right) \leq T_P - T_2 \leq 0.
        \]

        because\(|T_P| < T_2\).
    \end{itemize}

    So we get:

    \[
     L_h \psi_P \leq 0 \text{ for all } P \in X_\Omega .
    \]
     
\end{enumerate}

\subsection*{Step 3: Apply Lemma 7.58 (Discrete Maximum Principle)}

By Lemma 7.58, since \( \psi \) satisfies:
\[
\begin{cases}
\max_{P \in X} \psi_P \geq 0, \\
\forall P \in X_\Omega, \quad L_h \psi_P \leq 0,
\end{cases}
\]
we have:
\[
\max_{P \in X_\Omega} \psi_P \leq \max_{Q \in X_{\partial \Omega}} \psi_Q.
\]

i.e.

\[
\max_{P \in X_\Omega}\left( \varphi_P \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} + E_P\right) \leq \max_{Q \in X_{\partial \Omega}} \left(\varphi_Q \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} + E_Q\right).
\]

Since at the boundary points \(Q \in X_{\partial \Omega}\), \(E_Q = 0\), it follows that:

\[
\max_{P \in X_\Omega}\left( \varphi_P \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} + E_P\right) \leq \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} \max_{Q \in X_{\partial \Omega}} \varphi_Q .
\]

\subsection*{Step 4: Bound on the Solution Error}

The solution error \( E_P = U_P - u(P) \) satisfies:
\[
|E_P| \leq \max_{P \in X_\Omega}\left( \varphi_P \cdot \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\} + E_P\right) \leq \left( \max_{Q \in X_{\partial \Omega}} \varphi(Q) \right) \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\}.
\]

\section*{Conclusion}

We have proved Theorem 7.63 by constructing the function \( \psi \) and applying Lemma 7.58 (Discrete Maximum Principle). The solution error \( E_P \) is bounded by:
\[
|E_P| \leq \left( \max_{Q \in X_{\partial \Omega}} \varphi(Q) \right) \max \left\{ \frac{T_1}{C_1}, \frac{T_2}{C_2} \right\}.
\]



% ===============================================
\section*{ \center{\normalsize {Acknowledgement}} }
None.


\printbibliography

\end{document}