\documentclass{article}
\usepackage{xltxtra}
\usepackage{ctex}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{geometry}
\usepackage{booktabs}
\usepackage{graphicx}
\usepackage{float}
\usepackage{geometry}
\usepackage{booktabs}
\usepackage{tikz}
\usepackage{listings}

\makeatletter
\newcommand{\Rmnum}[1]{\expandafter\@slowromancap\romannumeral #1@}
\makeatother
\geometry{a4paper,top=1.5cm,bottom=1.5cm}
\title{Report for Chapter 5.}
\author{张皓祥 \\ 3200102536 强基数学2001}
\date{}

\begin{document}
\maketitle

% Theoretical questions
\section{Theoretical questions.}

% T1
\subsection*{\Rmnum{1}. Give a detailed proof of Theorem 5.7.}

\noindent \textbf{Sol.}

\noindent (1) Prove $\mathcal{C} [a,b]$ is an inner-product space over 
$\mathbb{C}.$

$\forall u,v,w,\rho \in\mathcal{C}[a,b],\rho(t)>0,\alpha\in\mathbb{C},$

\noindent $\bullet$ 
$\langle u,u \rangle=\int_a^b\rho(t)u(t)\overline{u(t)}\, dt = \int_{a}^{b} \rho(t)|u(t)|^2 \,dt \geq 0. $

\noindent $\bullet$ If $\langle u,u \rangle = \int_{a}^{b} \rho(t)|u(t)|^2 \,dt= 0,$ since 
$\rho(t)|u(t)|^2\geq 0\quad \forall t\in[a,b],\rho(t)|u(t)|^2=0\forall t.$ 
But $\rho(t)>0,$ hence $u(t)=0\forall t\in[a,b],$ which means $u\equiv 0.$

\noindent $\bullet$ 
$\langle u+v ,w\rangle=\int_{a}^{b} \rho(t)(u(t)+v(t))\overline{w(t)} \,dt = 
\int_{a}^{b} \rho(t)u(t)\overline{w(t)} \,dt + \int_{a}^{b} \rho(t)v(t)\overline{w(t)} \,dt$

$=\langle u,w \rangle + \langle v,w \rangle.$

\noindent $\bullet$ 
$\langle \alpha u,v \rangle = \int_{a}^{b} \rho(t)\alpha u(t)\overline{v(t)} \,dt
=\alpha \int_{a}^{b} \rho(t)u(t)\overline{v(t)} \,dt 
=\alpha \langle u,v \rangle.$

\noindent $\bullet$ 
$\langle v,u \rangle = \int_{a}^{b} \rho(t)v(t)\overline{u(t)} \,dt 
= \overline{\int_{a}^{b} \rho(t)u(t)\overline{v(t)} \,dt} 
= \overline{\langle u,v \rangle}.$

Hence $\mathcal{C} [a,b]$ is an inner-product space over $\mathbb{C}.$

\noindent (2) $\mathcal{C} [a,b]$ is a normed vector space over 
$\mathbb{R}.$

The norm:
$$
\left\lVert u\right\rVert _2 = \sqrt{\langle u,u \rangle} 
=(\int_{a}^{b} \rho(t)|u(t)|^2 \,dt \geq 0)^{\frac{1}{2}}.
$$

Vector space:

\noindent $\bullet$ By the law of addition of continuous funciton and mathmetical analysis, 
funcitons in $\mathcal{C}[a,b]$ are commutative and associative.

\noindent $\bullet$ By the law of mutiplication that $g=\alpha f\Leftrightarrow 
g(x) = \alpha f(x)\forall x$, the set satisfies compatibility.

\noindent $\bullet$ Pick zero function $f_0,f_0(x)=0\forall x,$ then 
$f+f_0=f\forall f \in \mathcal{C}[a,b].$

\noindent $\bullet$ $\forall f\in \mathcal{C}[a,b],$ pick $g$ as a funciton that 
$g(x) = -f(x),g+f=f_0$. Since $f$ is continuous, $|g(x_1)-g(x_2)|=|f(x_1)-f(x_2)|\forall x_1,x_2\in[a,b],$ 
then when $f$ converges, $g$ converges, which means $g\in \mathcal{C}[a,b]$

\noindent $\bullet$ Pick $1\in\mathbb{R},\forall f \in \mathcal{C}[a,b],\forall x\in[a,b],
1f(x)=x.$ Thus $1f=f.$

\noindent $\bullet$ By the laws of mutiplication and addition, $\mathcal{C}[a,b]$ 
satisfies distrivutive laws.

Hence $\mathcal{C}[a,b]$ is a normed vector space over $\mathbb{R}.$

% T2
\subsection*{\Rmnum{2}. Consider the Chbyshev polynomials of the first kind.}

\noindent \textbf{Sol.}

% (a)
\noindent (a) 
Note that Chbyshev polynomials have the forms $T_n(x)=\cos (n \arccos(x)).$ 
Pick $\theta = \arccos x \in [0,\pi]$ if $x\in[-1,1],$ then $T_n(\cos\theta) = \cos(n\theta)$.
$\forall m\neq n\in \mathbb{N},\cos[(n+m)\theta],\cos[(n-m)\theta]$ are not constant. 
Then we have  

\begin{align*}
    \langle T_n, T_m \rangle & = 
    \int_{-1}^1 \frac{T_n(x)T_m(x)}{\sqrt{1-x^2}}\, dx \\
    & = \int_{-1}^1 \frac{\cos(n\arccos(x)) \cos(m\arccos(x))}{\sqrt{1-x^2}}\, dx \\
    & = \int_{\pi}^0 \frac{\cos(n \theta) \cos(m \theta)}{\sqrt{1-\cos^2(\theta)}} \, d(\cos \theta) \\
    & = \int_0^{\pi} \cos(n\theta) \cos(m\theta) \, d\theta \\
    & = \int_0^{\pi} \frac{1}{2}(\cos[(n+m)\theta] + \cos[(n-m)\theta]) \, d\theta \\
    & = \frac{1}{2}(\frac{\sin[(n+m)\theta]}{n+m} + \frac{\sin[(n-m)\theta]}{n-m}) |_0^{\pi} \\
    & = 0.
\end{align*}
i.e. Chbyshev polynomials of the first kind are orthonormal on $[-1,1]$ with $\rho(x)=\frac{1}{\sqrt{1-x^2}}.$

% (b)
\noindent (b) By definition,

\noindent $\bullet$ $T_0(x) = \cos(0) = 1,\int_{-1}^1 \frac{1}{\sqrt{1-x^2}}\, dx=\arcsin(x)|_{-1}^1=\pi.$

\noindent $\bullet$ $T_1(x)=\cos(\arccos(x)) = x,\int_{-1}^1\frac{x^2}{\sqrt{1-x^2}}\, dx
=\int_{\pi}^0\frac{\cos^2\theta}{\sqrt{1-\cos^2\theta}} d(\cos\theta) 
=\int_0^{\pi}\cos^2\theta d\theta = \frac{\pi}{2}.$

\noindent $\bullet$ $T_2(x)=\cos(2\arccos(x))=2\cos^2(\arccos(x))-1=2x^2-1.$

$\int_{-1}^1 \frac{(2x^2-1)^2}{\sqrt{1-x^2}}dx
=\int_{\pi}^0 \frac{(2\cos^2\theta - 1)^2}{\sqrt{1-\cos^2\theta}} d(\cos\theta)
= \int_0^{\pi} \cos^2(2\theta)d\theta=\frac{\pi}{2}.$

Hence we can pick 
$u_1^*(x) = \frac{1}{\sqrt{\pi}}, 
u_2^*(x) = \sqrt{\frac{2}{\pi}}x, u_3^*(x) = \sqrt{\frac{2}{\pi}}(2x^2-1).$ 
They form an orthonormal system.

% T3
\subsection*{\Rmnum{3}. Least-square approximation of a continuous function.}

\noindent \textbf{Sol.}

\noindent (a) By conclusion at \Rmnum{2}, use the Chbyshev polynomials with weight 
funciton $\rho(x) = \frac{1}{\sqrt{1-x^2}}.$
$$
u_1^*(x) = \frac{1}{\sqrt{\pi}}, \quad
u_2^*(x) = \sqrt{\frac{2}{\pi}}x, \quad
u_3^*(x) = \sqrt{\frac{2}{\pi}}(2x^2-1).
$$
Calculate the Fourier coefficients of $y(x)=\sqrt{1-x^2},$ we have
\begin{align*}
    b_1 & = \int_{-1}^1 \frac{\sqrt{1-x^2}}{\sqrt{\pi(1-x^2)}} \, dx
    = \int_{-1}^1 \frac{1}{\sqrt{\pi}} \, dx = \frac{2}{\sqrt{\pi}}, \\
    b_2 & = \int_{-1}^1 \frac{\sqrt{1-x^2}}{\sqrt{1-x^2}} \sqrt{\frac{2}{\pi}}x \,dx
    = \int_{-1}^1 \sqrt{\frac{2}{\pi}}x \, dx = 0, \\
    b_3 & = \int_{-1}^1 \frac{\sqrt{1-x^2}}{\sqrt{1-x^2}} \sqrt{\frac{2}{\pi}}(2x^2-1) \, dx
    = \int_{-1}^1 \sqrt{\frac{2}{\pi}}(2x^2-1) \, dx = -\frac{2}{3}\sqrt{\frac{2}{\pi}}.
\end{align*}
Then the minimizing polynomial of $y(x)$ is 
$$
\hat{\varphi}(x)=\frac{2}{\sqrt{\pi}}\cdot \frac{1}{\sqrt{\pi}} - \frac{2}{3}\sqrt{\frac{2}{\pi}}\cdot 
\sqrt{\frac{2}{\pi}}(2x^2-1) = -\frac{8}{3\pi}x^2 + \frac{10}{3\pi}.
$$

\noindent (b) Denote the best approximation $\hat{\varphi}(x)=a_0+_1x+a_2x^2.$ 
Get the Gram matrix of $\{1, x, x^2\}$,
\begin{equation*}
    G(1, x, x^2) = 
    \left[
    \begin{matrix}
        \langle 1,1 \rangle & \langle 1,x \rangle & \langle 1,x^2 \rangle \\
        \langle x,1 \rangle & \langle x,x \rangle & \langle x,x^2 \rangle \\
        \langle x^2,1 \rangle & \langle x^2,x \rangle & \langle x^2,x^2 \rangle
    \end{matrix}
    \right]
    = 
    \left[
    \begin{matrix}
        \pi & 0 & \frac{\pi}{2} \\
        0 & \frac{\pi}{2} & 0 \\
        \frac{\pi}{2} & 0 & \frac{3\pi}{8}
    \end{matrix}
    \right]
\end{equation*}
Then calculate teh vector
\begin{equation*}
    \mathsf{c} =
    \left[
    \begin{matrix}
        \langle \sqrt{1-x^2},1 \rangle \\
        \langle \sqrt{1-x^2},x \rangle \\
        \langle \sqrt{1-x^2},x^2 \rangle
    \end{matrix}
    \right]
    =
    \left[
    \begin{matrix}
        2 \\ 0 \\ \frac{2}{3}
    \end{matrix}
    \right]
\end{equation*}
By Theorem 5.34, $G(1, x, x^2)^T\mathsf{a}=\mathsf{c},$ which means 
\begin{equation*}
    \left[
    \begin{matrix}
        \pi & 0 & \frac{\pi}{2} \\
        0 & \frac{\pi}{2} & 0 \\
        \frac{\pi}{2} & 0 & \frac{3\pi}{8}
    \end{matrix}
    \right]
    \left[
    \begin{matrix}
        a_0 \\ a_1 \\ a_2
    \end{matrix}
    \right]
    =
    \left[
    \begin{matrix}
        2 \\ 0 \\ \frac{2}{3}
    \end{matrix}
    \right]
\end{equation*}
Solve the normal equations, we can compute that 
$a_0=\frac{10}{3\pi},a_1=0,a_2=-\frac{8}{3\pi}.$ 

Hence $\hat{\varphi}(x)= -\frac{8}{3\pi}x^2 + \frac{10}{3\pi}.$

% T4
\subsection*{\Rmnum{4}. Discrete least square via orthonormal polynomials.}

\noindent \textbf{Sol.}

% (a)
\noindent (a)
By definition, $\left\lVert u(t),v(t) \right\rVert = \sum\limits_{i=1}^{12}u(t_i)v(t_i).$
Hence the norm of the space is 
$\left\lVert u(t) \right\rVert = \sqrt{\sum\limits_{i=1}^{12}u^2(t_i)}.$
Apply the Gram-Schmidt process to $(1,x,x^2)$:

\noindent $\bullet$ $v_1(x)=1,\left\lVert v_1 \right\rVert=\sqrt{12}=2\sqrt{3},
u_1^*(x)=\frac{v_1(x)}{\left\lVert v_1 \right\rVert}=\frac{1}{2\sqrt{3}}.$

\noindent $\bullet$ $v_2(x)=x-\langle x, u_1^*(x) \rangle u_1^*(x) 
=x - \sum\limits_{i=1}^{12}\frac{x_i}{2\sqrt{3}} \cdot \frac{1}{2\sqrt{3}}=x-6.5,
\left\lVert v_2 \right\rVert = \sqrt{\sum\limits_{i=1}^{12}(x_i-6.5)^2} = \sqrt{143}.$ 
$u_2^*(x) = \frac{v_2(x)}{\left\lVert v_2 \right\rVert} = \frac{1}{\sqrt{143}}(x-6.5).$

\noindent $\bullet$ $v_3(x) = x^2 - \langle x^2, u_1^*(x) \rangle u_1^*(x) - \langle x^2, u_2^*(x) \rangle u_2^*(x)
=x^2 - \sum\limits_{i=1}^{12} \frac{x_i^2}{12} - \sum\limits_{i=1}^{12} x_i^2(\frac{x_i}{\sqrt{143}} -\frac{1}{2}\sqrt{\frac{13}{11}}) 
\cdot (\frac{x}{\sqrt{143}} -\frac{1}{2}\sqrt{\frac{13}{11}}) = x^2 - 13x + \frac{91}{3},$
$\left\lVert v_3 \right\rVert = \sqrt{\sum\limits_{i=1}^{12}(x_i^2 - 13x_i + \frac{91}{3})^2}
= 2 \sqrt{335}.$
\noindent Thus $u^*_3(x) = \frac{v_3(x)}{\left\lVert v_3 \right\rVert} 
= \frac{1}{2\sqrt{335}}(x^2 - 13x + \frac{91}{3}).$

Then the list $( \frac{1}{2\sqrt{3}} , \frac{1}{\sqrt{143}}(x-6.5), 
\frac{1}{2\sqrt{335}}(x^2 - 13x + \frac{91}{3}) )$ is orthonormal

% (b)
\noindent (b)

Calculate the Fourier coefficients, we have 
\begin{align*}
    b_1 & = \langle y, \frac{1}{2\sqrt{3}} \rangle = \sum\limits_{i=1}^{12} \frac{y_i}{2\sqrt{3}}
    = 277\sqrt{3} \\
    b_2 & = \langle y, \frac{1}{\sqrt{143}}(x-6.5) \rangle = \sum\limits_{i=1}^{12} \frac{y_i(x_i-6.5)}{\sqrt{143}}
    = \frac{589}{\sqrt{143}} \\
    b_3 & = \langle y, \frac{1}{2\sqrt{335}}(x^2 - 13x + \frac{91}{3})\rangle
    = \sum\limits_{i=1}^{12} \frac{y_i (x_i^2 - 13x_i + \frac{91}{3})}{2\sqrt{335}}
    = \frac{6034}{\sqrt{335}}
\end{align*}
Thus 
\begin{align*}
    \hat{\varphi}(x) & = b_1u_1^*(x) + b_2 u^*_2(x) + b_3 u^*_3(x) \\
    & = \frac{277}{2} + \frac{589}{143}(x-6.5) + \frac{3017}{335}(x^2 - 13x + \frac{91}{3}) \\
    & = 384.908 - 112.959 x + 9.00597 x^2
\end{align*}
It's the same as that of the example on the table of sales record in the notes. The differences 
come from the calculation by machine.

% (c)
\noindent (c)
The orthonormal list of $u^*_1(x), u^*_2(x), u^*_3(x)$ can be reused since they only 
depends on the $N$ and $x_i$. But the Fourier coefficients cannot be reuesd since they 
also depends on $y_i$. If we use the orthonormal polynomials, we can restore them as 
known conditions, then simplify the process of calculation and only need to calculate 
the Fourier coefficients. Besides, the method avoid calculating $G^{-1}$, which might has 
terrible conditional number and get a catastrophic error.

% Programming assignments
\section{Programming assignments.}

% T1
\subsection*{\Rmnum{1}. Perfoem discrete least square via normal equations.}

By normal equations, we can calculate the inner product among $(1,x,x^2),$ then 
get the Gram matrix. Then we calculate the inner product between $y$ and $(1,x,x^2)$ to 
get the vector $c$. Finally, calculate $a = G^{-1}c$ and we can get the coefficients. 
They are $a_0 = 2.17571993, a_1 = 2.67041339, a_2 = -0.23844394,$ which implies that 
$$
\hat{\varphi}(x) = -0.23844394x^2 + 2.67041339x + 2.17571993
$$
is the best fitting quedratic polynomial. Plot it as follow:

\begin{figure}[H]
    \centering
    \includegraphics[scale=0.7]{src/Figure.png} \label{Fig}
\end{figure}
It can represent the real funciton properly by the figure.

% T2
\subsection*{\Rmnum{2}. Solve the problem via QR factorrization.}

Use QR factorrization to solve the problem. List all data $x$ and apply function $1, x, x^2$ 
to them, we can can a matrix $A\in \mathbb{R}^{21\times 3}.$ List all data $y$ as a 
column vector. By QR factorrization, $A = QR$, then the coefficients vector satisfies 
$x = R^{-1}(Q^Tb)$. The result we get this time is the same as normal equations. 
i.e. 
$$
\hat{\varphi}(x) = -0.23844394x^2 + 2.67041339x + 2.17571993
$$

Calculate the condition number based on the 2-norm of G and R, we can get 
$cond(G) = 18980.894284142454, cond(R) = 137.7711663743338,$ which implies that 
the former is much larger than the latter.

\end{document}