\documentclass[twoside,a4paper]{article}
\usepackage{geometry}
\geometry{margin=1.5cm, vmargin={0pt,1cm}}
\setlength{\topmargin}{-1cm}
\setlength{\paperheight}{29.7cm}
\setlength{\textheight}{25.3cm}

% useful packages.
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{enumerate}
\usepackage{graphicx}
\usepackage{multicol}
\usepackage{fancyhdr}
\usepackage{layout}
\usepackage{tikz}
\usepackage{pgf-umlcd}
\usepackage{subfigure}
\usepackage{multirow}

% some common command
\newcommand{\dif}{\mathrm{d}}
\newcommand{\avg}[1]{\left\langle #1 \right\rangle}
\newcommand{\difFrac}[2]{\frac{\dif #1}{\dif #2}}
\newcommand{\pdfFrac}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\OFL}{\mathrm{OFL}}
\newcommand{\UFL}{\mathrm{UFL}}
\newcommand{\fl}{\mathrm{fl}}
\newcommand{\op}{\odot}
\newcommand{\Eabs}{E_{\mathrm{abs}}}
\newcommand{\Erel}{E_{\mathrm{rel}}}

\begin{document}

\pagestyle{fancy}
\fancyhead{}
\lhead{NAME Jiatu Yan}
\chead{Numerical ODE/PDE project \#01}
\rhead{Date 2021.4.28}


\section{Data structure and explanation of the programs.}
\begin{tikzpicture}
	\tiny{	\begin{abstractclass}[text width=8cm]{TimeIntergrator<DIM>}{4, 0}
		\operation[0]{+one\_step\_method(std::vector<Point<DIM> >\& \_Pnts,\\
			\qquad double \_stepl, VectorFunction<DIM, DIM>\& \_f) const : void}
	\end{abstractclass}
	
	\begin{class}[text width = 4cm]{LMM Info}{8, -3.5}
		\attribute{\#\_\_Alpha : double*}
		\attribute{\#\_\_Beta : double*}
		\attribute{\#\_\_S : std::size\_t}
		\attribute{\#\_\_P : std::size\_t}
        \end{class}

	\begin{class}[text width = 4cm]{RK Info}{9, -5.5}
		\attribute{\#\_\_A : double*}
		\attribute{\#\_\_B : double*}
		\attribute{\#\_\_C : double*}
		\attribute{\#\_\_S : std::size\_t}
        \end{class}
	
	\begin{abstractclass}[text width= 8cm]{LMM<DIM>}{0, -2}
		\inherit{TimeIntergrator<DIM>}
		\attribute{\#\_\_LMM\_Info : LMM\_Info}
		\attribute{\#\_\_RK\_Info : RK\_Info}
		\attribute{\#\_\_num\_init : std::size\_t}
		\operation{+set\_cof(std::string \_file\_in, const std::size\_t \_p) : void}
		\operation{+get\_s() const : const std::size\_t}
		\operation{+get\_p() const : const std::size\_t}
		\operation{+get\_num\_init() const : const std::size\_t}
		\operation{+get\_alpha\_i(const std::size\_t \_i) const : const double}
		\operation{+get\_beta\_i(const std::size\_t \_i) const : const double}
		\operation{+starting\_method(std::vector<Point<DIM> >\& \_Pnt,\\
			\qquad const double \_stepl, const std::size\_t \_num,\\
			\qquad VectorFunction<DIM, DIM>\& \_f) const : void}
		\operation[0]{+one\_step\_method(
			std::vector<Point<DIM> >\& \_Pnts,\\ double \_stepl,
		\qquad VectorFunction<DIM, DIM>\& \_f) const : void}
	\end{abstractclass}
	
	\begin{class}[text width = 9cm]{Classical RK<DIM>}{10, -2} 
		 \implement{TimeIntergrator<DIM>}
                 \operation{+Classical\_RK()}
                 \operation{+$\sim$Classical\_RK()}
                 \operation{+one\_step\_method(std::vector<Point<DIM>\& \_Pnts,\\
		 \qquad const double \_stepl, VectorFunction<DIM, DIM>\& \_f) const override : void}
	\end{class}

	\begin{class}[text width = 8cm]{Explicit LMM<DIM>}{0, -8}
		\implement{LMM<DIM>}
		 \operation{+one\_step\_method(std::vector<Point<DIM> >\& \_Pnts,\\
                        \qquad double \_stepl, VectorFunction<DIM, DIM>\& \_f) const : void}
	\end{class} 
	\begin{class}[text width = 8cm]{Implicit LMM<DIM>}{10, -8}
                \implement{LMM<DIM>}
                \operation{+one\_step\_method(std::vector<Point<DIM> >\& \_Pnts,\\
                        \qquad double \_stepl, VectorFunction<DIM, DIM>\& \_f) const : void}
        \end{class}     

	\begin{class}[text width = 4cm]{ABM<DIM>}{-1, -10}
		\inherit{Explicit LMM<DIM>}
		\operation{+ABM()}
		\operation{+ABM(const std::size\_t \_p)}
		\operation{+$\sim$ABM()}	
	\end{class}
	\begin{class}[text width = 4cm]{AMM<DIM>}{5, -10}
		\inherit{Implicit LMM<DIM>}
		\operation{+AMM()}
		\operation{+AMM(const std::size\_t  \_p)}
		\operation{+$\sim$AMM()}	
	\end{class}
	\begin{class}[text width = 4cm]{BDF<DIM>}{10, -10}
		\inherit{Implicit LMM<DIM>}
		\operation{+BDF()}
		\operation{+BDF(const std::size\_t \_p)}
		\operation{+$\sim$ABM()}	
	\end{class}

	\draw[umlcd style dashed line, ->] (LMM Info) -- node[above, sloped, black]{$<<$import$ >>$} (LMM<DIM>);

	\draw[umlcd style dashed line, ->] (RK Info) -- node[above, sloped, black]{$<<$import$ >>$} (LMM<DIM>);
}
\end{tikzpicture}

The uml diagram above shows the data structure and implementation of the abstruct class TimeIntergrator
. The core function is the one in TimeIntergrator that enables the numerical method of IVP to step forward with one time step.
Some trivial functions in some class are omitted due to the limited space.
	For classical Runge-Kutta method, the implementation is trivial that I just need implement 
the one\_step\_method by classical Runge-Kutta method.

While for linear multistep method, I created several classes. The abstracted class LMM is added functions that can
load and store the coefficients of linear multistep method and Runge-Kutta method when it is generated 
and operate on these coefficients. For an LMM with order p, I used Rongue-Kutta method with the same order to generate the 
other initial points from the given one points. Because if we generate the initial points by methods with lower order,
we will introduce serious errors which would nullify all the advantages of the higher order the chosen method has.

Then I implemented the one step method function of LMM by two classes, implicit lmm and explicit one.
For explicit one, the implementation is also trivial. It implements the ABM method.
The implicit one implements the AMM and BDF method. In implicit lmm method, we need to solve a nonlinear equations $F\left( x \right)=0 $.
The equations can be solved by Newton's method, which is 
\begin{equation*}
	\begin{split}
		J\left( F\left( x_n \right)  \right)\Delta x_n &= -F\left( x_n) \right)\\
		x_{n+1}&=x_{n} + \Delta x_n,\\
	\end{split}
\end{equation*}
where the Jacobi matrix $J\left( F\left( x_n \right)  \right) $ is $\left( \partial F_i \slash \partial x_j \left( x_n \right) \right)_{n\times n} $
.By the property of Newton's method, the sequence $\{x_n\}$ will converge to the solution quickly with speed of order 2.
To avoid calculating the exact form of Jacobi matrix, I used modified Newton's Method.
The algorthim is to choose a small variable h to approximate the partial differentiation of the function, which is
\[
	\frac{\partial f_i}{\partial x_j}\left( x_n \right) \approx 
	\frac{F_i\left( x_n + h*e_j \right) - F_i\left( x_n \right)  }{h}
.\] 
In order to preserve the order of Newton's Method, the paramater h should be chosed to converge to zero through the iteration.
We may set $h_k=\gamma_k*h$, where $\gamma_k\rightarrow0$ as $k\rightarrow \infty$.

The other problem of the implicit method is the start point of each iteration.
If we choose it roughly, the solution of the nonlinear equations will possibly go down to the local minimum and cannot converge.
This may introduce error to our programs.
To get a start point nearby the solution, I used ABM of the same order to predict the next step of the implicit method. Since the number of the initial data may be different from ABM and AMM of the same order,
I added a constent \_\_num\_init in LMM to store the number of initial data needed for an LMM.

To generate the different TimeIntergrator method easily in the main program, I used Factory pattern which is 

\begin{tikzpicture}
	\tiny{
	\begin{class}[text width = 8cm]{TimeIntergratorFactory<DIM>}{0, -4}
		\operation{+RegisterMethod(const std::string \_MethodId,\\
		\qquad CreateTimeIngergratorCallback \_createFn) : bool}
		\operation{+UnregisterMethod(const std::string \_MethodId) : bool}
		\operation{+CreateMethod(const std::string \_MethodId, const std::size\_t \_p)\\
		\qquad: TimeIntergrator<DIM>*}
		\operation{+CreateMethod(const std::string \_MethodId) : TimeIntergrator<DIM>*}
		\operation{+$\sim$TimeIntergratorFactory()}
		\operation{+Instance() : static TimeIntergratorFactory<DIM>*}
		\attribute{\#\_\_obj : static TimeIntergratorFactory<DIM>*}
		\attribute{\#\_\_callbacks CallbackMp}
		\attribute{\#TimeIntergratorFactory()}
	\end{class}

	\begin{class}[text width = 4cm]{Classical RK<DIM>}{10, -2}
                 \operation{+Classical\_RK()}
                 \operation{+$\sim$Classical\_RK()}
        \end{class}


	\begin{class}[text width = 4cm]{ABM<DIM>}{10, -4}
                \operation{+ABM()}
                \operation{+ABM(const std::size\_t \_p)}
                \operation{+$\sim$ABM()}
        \end{class}


	 \begin{class}[text width = 4cm]{AMM<DIM>}{10, -6}
                \operation{+AMM()}
                \operation{+AMM(const std::size\_t \_p)}
                \operation{+$\sim$AMM()}
        \end{class}


	 \begin{class}[text width = 4cm]{BDF<DIM>}{10, -8}
                \operation{+BDF()}
                \operation{+BDF(const std::size\_t \_p)}
                \operation{+$\sim$ABM()}
        \end{class}

	\draw[umlcd style dashed line, ->] (ABM<DIM>) -- node[above, sloped, black]{$<<$import$ >>$} (TimeIntergratorFactory<DIM>);
	\draw[umlcd style dashed line, ->] (AMM<DIM>) -- node[above, sloped, black]{$<<$import$ >>$} (TimeIntergratorFactory<DIM>);
	\draw[umlcd style dashed line, ->] (BDF<DIM>) -- node[above, sloped, black]{$<<$import$ >>$} (TimeIntergratorFactory<DIM>);
	\draw[umlcd style dashed line, ->] (Classical RK<DIM>) -- node[above, sloped, black]{$<<$import$ >>$} (TimeIntergratorFactory<DIM>);
}
\end{tikzpicture}

The factory is a singleton, which means there is only one single factory in the program.
All the method are registered to the factory when compiling the main program.
By using factory pattern, we can easily genereate the method by loading the methodId. 
It is also very convenient to add and delete methods from the factory.

\section{error and convergence rates tests.}

I chose proper step numbers to test the solution error, convergence rates and CPU time.
For each method and each problems, I toke step number N and a growing rate $\gamma$,
and run the program to $T_i$ with  $\gamma^{n}N$ steps to calculate the error pattern of each method.  
The results are outputed to the .txt files in file folder named acctest. I will list part of them to show the convergence rates of each method.They are the results produced by the program on my own computer, so there may exist some little difference between data output by the program.
For BDF1 in IVP1, the running time is too long that I have to choose smaller time step and cannot get the proper solution.
\subsection*{IVP 1}
For initial problem 1, the solution is periodic. So we can take the initial condition as the exact solution. 
For each method of order p, we expect to get the error pattern that is
\[
	\mid U\left( T_1, \gamma^{n}N \right)  -  u\left( T_1 \right) \mid_{\infty}/ 
	\mid U\left( T_1, \gamma^{n + 1}N \right) - u\left( T_1 \right) \mid_\infty  \approx \gamma^{p}  
,\]
where N is the number of steps.

Part of the error tests are listed in the following labels.
\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
	\hline
	\multicolumn{5}{|c|}{ABM for IVP 1}\\
\hline
p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
1&400000000&0.102219&0.927524&97.8768\\
\hline
2&3200000&0.0741694&2.09103&5.65885\\
\hline
3&270000&0.0536765&3.35498&0.694462\\
\hline
4&405000&0.00488826&3.98394&1.12888\\
\hline
\end{tabular}
\end{equation*}

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
	\hline
	\multicolumn{5}{|c|}{AMM for IVP 1}              \\
\hline
	p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
	2&1600000&0.0564681&1.88987&115.779\\
\hline
3&640000&0.0221315&2.93457&46.8938\\
\hline
4&180000&  0.00871299&      3.64892& 17.9078\\
\hline
5&160000&  0.00315905&      5.13549& 16.6668\\
\hline
\end{tabular}
\end{equation*}

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
	\hline
	\multicolumn{5}{|c|}{BDF for IVP 1}           \\
\hline
	p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
	1&&&&\\
	\hline
	2&3200000& 0.0559776&       1.87818& 232.173\\
\hline
	3&675000&  0.0124969&       2.98318& 65.9975\\
\hline
	4&320000&  0.0225622&       3.91525&25.1243\\
\hline
\end{tabular}
\end{equation*}

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
        \hline
        \multicolumn{5}{|c|}{CRK for IVP 1}           \\
\hline
        p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
	 &160000&  0.00149189 &     3.94802&0.3173\\
\hline
	
\end{tabular}
\end{equation*}

\subsection*{IVP 2}
For initial problem 2, we cannot get the exact solution. So we should use Richardson extrapolation to compute the error pattern, which calculate how the error changes when densening the grid(enlarging number of steps). 
\[
	\tilde{E} = \mid U\left( T_2, \gamma^{n}N \right) - U\left( T_2, \gamma^{n + 1}N \right) \mid_\infty 
	/ \mid U\left( T_2, \gamma^{n+1}N \right) - U\left( T_2, \gamma^{n + 2}N \right) \mid_\infty \approx \gamma^{p}  
.\]

Part of the error tests are listed in the following labels.

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
        \hline
\multicolumn{5}{|c|}{ABM for IVP 2}                  \\
\hline
p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
1&320000 & 0.199069 &       0.835513&        0.079209\\
\hline
2&320000&  2.01784e-05 &    1.95667& 0.566666\\
\hline
3&20000  & 0.00360838 &     2.88998& 0.040046\\
\hline
4&5625  &  0.000427615  &   5.19662& 0.017148\\
\hline
\end{tabular}
\end{equation*}

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
        \hline
\multicolumn{5}{|c|}{AMM for IVP 2}                  \\
\hline
p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
2&20000&0.00106553&2.00654&1.42758\\
\hline
3&20000&   0.000408449&     3.01331& 1.42968\\
\hline
4&16875 &  3.06178e-08&4.50896&1.67984\\
\hline
5&3375 &   0.000246215 &    4.89971 &0.5717\\
\hline
\end{tabular}
\end{equation*}

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
        \hline
\multicolumn{5}{|c|}{BDF for IVP 2}                  \\
\hline
p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
\hline
1&160000&  0.105593&        1.6162&  11.1506\\
	\hline
2&200000 & 4.02418e-05&     1.90328& 14.2084\\
\hline
3&20000&   0.00245527 &     3.07568& 1.4305\\
\hline
4&11250&   5.9861e-06&      5.59663& 1.10229\\
\hline
	\end{tabular}
\end{equation*}

\begin{equation*}
\begin{tabular}{|c| c| c|c|c|}
       \hline
        \multicolumn{5}{|c|}{CRK for IVP 2}           \\
\hline
        p&number of steps& error& order of convergence speed&CPU time/sec\\
\hline
	 &1600&	0.00430047&	4.56776&	0.003351\\
\hline  

\end{tabular}
\end{equation*}

Thus, we can conclude that most of the order of accuracy of the time intergrator methods I wrote meet the theoretical one.
But when using BDF method of order 1, I cannot get the order of accuracy 1 when solving IVP1 and I have to use larger number of steps to get it. 
This situation may be caused by the initial data for each step,
the solution of each step may lie in local minimum when using ABM to get the start point and modified Newton to solve nonlinear equations. 
While when solving IVP2, the order of the BDF\_1 can be reached.

For some method, if the number of steps are taken too large, the order of accuracy may behave badly. 
This may occur beacuse when the number of steps are large, the error is getting very small like $10^9$,
the rounding error may  become dominant. For example, in LMM, I approximate the partial differentiation by choosing a small
h. There may introduce error when h is small since I calculate a division between two small numbers.

\section{Plots}
The main program build a script for gnuplot to plot pictures about the initial value problems for each method with proper number of steps.
The pictures are stored in the file folder called "figures". Some of the pictures drawed by points calculated by the methods are shown in the end of the report.

From the plots we can find that for IVP1, methods with lower order will remain big error
which represents that there remains an open mouse in the picture when number of steps are not large enough.
While for IVP2, maybe the problem itself is not so bad that all the solutions look good.
To get the plots that are visually indistinguishable from the plot in the assigment,
I chose the proper time-step size with which the error of each method is small.

\begin{equation*}
\begin{tabular}{|c|c|c|c|c|}
\hline
\multicolumn{5}{|c|}{time steps to get good plots for IVP1.}                  \\
\hline
p&ABM&AMM&BDF&CRK\\
\hline
1&16000000&&2000000&\multirow{5}{*}{20000}\\
\cline{1-4}
2&300000&200000&100000&\\
\cline{1-4}
3&80000&80000&120000&\\
\cline{1-4}
4&80000&40000&50000&\\
\cline{1-4}
5&&20000&&\\
\hline
\end{tabular}
\begin{tabular}{|c|c|c|c|c|}
\hline
\multicolumn{5}{|c|}{time steps to get good plots for IVP2.}                  \\
\hline
p&ABM&AMM&BDF&CRK\\
\hline
1&10000000&&3200000&\multirow{5}{*}{800}\\
\cline{1-4}
2&5000&4000&6000&\\
\cline{1-4}
3&7000&4000&8000&\\
\cline{1-4}
4&2500&2000&2000&\\
\cline{1-4}
5&&1500&&\\
\hline
\end{tabular}
\end{equation*}
\section{Compair Euler's method and Runge-Kutta method}
The plots of solutions of IVP1 by using Euler's method and Runge-Kutta method are shown below.

\begin{figure*}[ht]
	\begin{minipage}[t]{0.45\linewidth}
                \centering
		\includegraphics[width=5cm, height = 7.5cm, angle=-90]{../figures/ABM_24000_1_IVP1_compair.eps}
		\end{minipage}
	\begin{minipage}[t]{0.45\linewidth}
                \centering
		\includegraphics[width=5cm, height=7.5cm, angle=-90]{../figures/CRK_6000_IVP1_compair.eps}
	\end{minipage}
\end{figure*}

The figures are bad. As shown the tabular in section2,
I spent 97.8768 seconds CPU time to get an error of only $10^{-1}$, and it needs 400000000 steps.
While using classical Runge-Kutta method, I only need to run it for 320000 steps and get an error of $10^{-3}$ in 0.647982
 seconds. 
Obviously, Runge-Kutta is the better one according to not only the number of steps but also the CPU time.
This conclusion can also be got by the order of accuracy of Runge-Kutta method is 5 while Euler's method is only 1.

\section{Conclusion}
After solving the two initial value problems by different methods,
we can conclude that the ABM with order one converges really slowly.
I think that it is this bad property causes BDF of order 1 also converges bad, because I used ABM of order 1 to predict the solution of each step.
While when we use lmm methods of higher orders, we can find distinct difference on the performance of plots. 
We can draw a good plot in quite small number of steps. 
By the tabular of section 2, we can find that implicit lmm have better performance on accuracy,
they are more likely to get an accuracy near $10^-5$, which is the tolerant range of error I set in the Newton's method.

\section{Some plots drawed by gnuplot.}
\begin{figure*}[ht]
                \centering
                \includegraphics[width=10cm, height=15cm, angle=-90]{../figures/ABM_405000_3_IVP1.eps}
\end{figure*}
\begin{figure*}[ht]
        \centering
        \includegraphics[width=10cm, height=15cm, angle=-90]{../figures/AMM_1280000_3_IVP1.eps}
\end{figure*}
\begin{figure*}[ht]
        \centering
        \includegraphics[width=10cm, height=15cm, angle=-90]{../figures/BDF_80000_3_IVP2.eps}
\end{figure*}

\begin{figure*}[ht]
                \centering
                \includegraphics[width=10cm, height=15cm, angle=-90]{../figures/CRK_6400_IVP2.eps}
\end{figure*}

\end{document}

%%% Local Variables: 
%%% mode: latex
%%% TeX-master: t%%% End: 
