\documentclass[a4paper,10pt]{article}
\usepackage{amsmath,amsfonts,amsthm,amssymb,graphicx, url}
\usepackage{listings}
\lstset{language=C}
\lstset{basicstyle=\small}
\newcommand{\bvec}[1]{\boldsymbol{#1}} % vectors in bold instead of with an arrow on top

% Title Page
\title{Assignment series 3 -- Computational Finance}
\author{Florian Speelman \& Jannis Teunissen}


\begin{document}
\maketitle
\section{Introduction}
In this report we present results from option valuation simulations. We integrate
the Black--Scholes PDE with two different schemes, the FTCS (Forward--Time Centered--Space)
and Crank--Nicolson scheme. Our implementation of these schemes in a \emph{C} program
can value European and digital options, and computes the Black--Scholes analytical values
for the hedge and premium of these options.

One thing to mention is that these schemes work with discrete steps, and although
these steps represent something like time or a price we will work with them as if
they are simply numbers (for example stating $\Delta X = \Delta \tau$). This makes sense
because floating point values in a computer carry no unit.

\section{The Black--Scholes PDE}
The assumptions of the Black--Scholes model lead to the 
Black--Scholes PDE\footnote{See \url{http://en.wikipedia.org/wiki/Black–Scholes} for a short review.}
\begin{equation}
 \frac{\partial V}{\partial t} + r S \frac{\partial V}{\partial S} 
  + \tfrac{1}{2}\sigma^2 S^2\frac{\partial^2 V}{\partial S^2} -r V = 0,
\end{equation}
where $V$ is the value of a vanilla option at the current stockprice $S$ and time $t$,
$r$ is the risk-free interest rate and $\sigma$ is the volatility of $S$.

Typically the payoff of such an option is only known at the time of maturity $T$, so that
one has to integrate back in time to find the current value. Therefore it is natural
to introduce $\tau = T - t$. Combined with the transformation $X = \ln S$ this leads to the
following equation
\begin{equation}
  -\frac{\partial V}{\partial \tau} + (r - \tfrac{1}{2}\sigma^2)\frac{\partial V}{\partial X}
  + \tfrac{1}{2}\sigma^2 \frac{\partial^2 V}{\partial X^2} -r V = 0.
\label{pde}
\end{equation}
Assuming $r$ and $\sigma$ are constant we now have a PDE with constant coefficients, ready to
be discretized. 
\subsection{FTCS \& BCTS schemes}
A well-known and simple way to discretize equation \eqref{pde} is the explicit FTCS (Forward--Time Central--Space)
scheme. In this scheme time derivatives are replaced by the first-order Euler forward difference
\[
 \frac{\partial V}{\partial \tau} \approx \frac{V_{i}^{n+1} - V_{i}^{n}}{\Delta \tau},
\]
where $V_{i}^{n} = V(i\Delta X, n \Delta \tau)$. Non--time derivatives are replaced by centered difference
approximations, which are second order:
\begin{align*}
 \frac{\partial V}{\partial X} &\approx \frac{V_{i+1}^{n} - V_{i-1}^{n}}{2\Delta X}\\
 \frac{\partial^2 V}{\partial^2 X} &\approx \frac{V_{i+1}^{n} -2 V_{i}^{n} + V_{i-1}^{n}}{\Delta X^2}.
\end{align*}
In this way we get the FTCS scheme for equation \eqref{pde}
\begin{equation}
 V_{i}^{n+1} = k_1 V_{i+1}^{n} + k_0 V_{i}^{n} + k_{-1} V_{i-1}^{n},
\label{ftcs}
\end{equation}
with the $k_i$ given by
\begin{align*}
 k_1 &= (r - \tfrac{1}{2}\sigma^2)\frac{\Delta \tau}{2 \Delta X} + \sigma^2\frac{\Delta \tau}{2\Delta X^2}\\
 k_0 &= 1 - \sigma^2\frac{\Delta \tau}{\Delta X^2} -r \Delta \tau\\
 k_{-1} &= (\tfrac{1}{2}\sigma^2 - r)\frac{\Delta \tau}{2 \Delta X} + \sigma^2\frac{\Delta \tau}{2\Delta X^2}.
\end{align*}

If we instead use the Euler backward difference method for the time derivate we get the
BCTS scheme, here evaluated at $\tau = (n+1)\Delta \tau$:
\begin{equation}
 -k_1 V_{i+1}^{n+1} + (-k_0 + 2) V_{i}^{n+1} - k_{-1} V_{i-1}^{n+1} = V_i^{n}.
\label{btcs}
\end{equation}
This scheme is implicit, so at every step one has to solve a system of equations.
\subsection{Crank-Nicolson scheme}
Thus far we have constructed two schemes that have an error that is first order in the timestep $\Delta \tau$ and
second order in the `spatial' timestep $\Delta X$.
If we can make the first order errors cancel, we will have a scheme that is second order
in both time and space.

In general, if $\frac{\partial f(x,t)}{\partial t} = G[f(x,t), x, t]$, then a simple Taylor expansion
shows that the forward Euler method leads to an error
\[
 \frac{f(x,t+h) - f(x)}{h} = G[f(x,t), x, t] + h/2 \frac{\partial^2 f(x,t)}{\partial t^2} + O(h^2).
\]
Similarly the backward Euler method, if evaluated at $t+h$, leads to the following error term
\[
 \frac{f(x,t+h) - f(x)}{h} = G[f(x,t+h), x, t] - h/2 \frac{\partial^2 f(x,t+h)}{\partial t^2} + O(h^2).
\]
Now since
\[
\frac{\partial^2 f(x,t)}{\partial t^2} - \frac{\partial^2 f(x,t+h)}{\partial t^2} =
-h \frac{\partial^3 f(x,t)}{\partial t^3} + O(h^2),
\]
we see that adding the FTCS and BTCS schemes leads to a new scheme that is second order
in both space and time.
Adding equations \eqref{ftcs} and \eqref{btcs} gives\footnote{Of course the schemes can be written
in multiple ways so one has to keep track of the first order time error and its sign.}
\begin{equation}
 -k_1 V_{i+1}^{n+1} + (-k_0 + 3) V_{i}^{n+1} - k_{-1} V_{i-1}^{n+1} =
 k_1 V_{i+1}^{n} + (k_0 + 1) V_{i}^{n} + k_{-1} V_{i-1}^{n}.
\label{cranknicolson}
\end{equation}
This implicit scheme is better known as the Crank--Nicolson scheme.
\section{Implementation}
We implemented both the FTCS and Crank--Nicolson scheme in a \emph{C} program. Equation \eqref{cranknicolson}
can be rewritten as as a matrix equation $\bvec{A} \cdot \bvec{V}^{n+1} = \bvec{C}(\bvec{V}^{n})$, where $\bvec{A}$ is tri-diagional.
For solving this system we used the GNU Scientific Library, which comes with a \verb|gsl_linalg_solve_tridiag()| function.
The FTCS scheme can be implemented straightforward, since it is explicit.

For European style options the value is known at the time of maturity $T$, or equivalently at $\tau = 0$. But
except for an initial value $V_i^0$ we also need to have boundary conditions for some minimal and maximal $X$.
There are several choices here, one could for example set the lower and upper boundary to the Black--Scholes
value (if it is known) and then make the interval between them arbritrarily small. However we chose
the following boundary conditions:
\begin{itemize}
 \item European call option: The boundary for small $X$, and thus small $S$, is set to zero.
This is accurate if $S_\text{lower} \ll K$, because the expected payoff will then be negligible.
The upper boundary is set to $S_\text{upper} - K e^{-r\tau}$ (where $\tau = T - t$).
This is accurate if $S_\text{upper}\gg K$,
because then the expected payoff will be $e^{-r\tau} E[(S_T-K)^{+}],
\approx e^{-r\tau} [S_\text{upper} e^{r\tau} - K] = S_\text{upper} - K e^{-r\tau}$,
see figure \ref{fig:asymptote}
 \item Digital option: The lower boundary is set to zero as for the European call option.
The upper boundary is set to $e^{-r\tau}$, the discounted value of an expected payoff of one.
\end{itemize}

\begin{figure}
\begin{center}
\includegraphics[width = 9cm]{asymptote.png}
\caption{Value of an European call option at $t=0$ versus the stockprice at $t = 0$.
Here $r = 0.04$, $\sigma = 0.3$, $K = 100$, $T = 1$, $X_\text{lower}$ = -10 and $X_\text{upper} = 10$. 
Clearly $V$ goes quite fast to zero for $S < K$ and to $S - K e^{-r\tau}$
for $S > K$.
\label{fig:asymptote}}
\end{center}
\end{figure}
With these boundary conditions we can integrate $V(X, \tau)$ from $\tau = 0$ to $\tau = T$. The value
of the option at $t=0$ (so $\tau = T$) is approximated by $V$ at the $X$-value corresponding to $S_0$.
When the $X$ and $\tau$ intervals are discretized it is thus important that this point coincides with
a grid point, or else we have to interpolate which leads to an additional error.

To achieve this we scaled $X$ as $X = \ln\frac{S}{S_0}$. The user can specify an upper and lower
bound for this scaled $X$ and a stepsize, after which the index $z$ corresponding to $X = 0$ is calculated.
Then $X_i = (i - z)\Delta X$, so that the value corresponding to $S_0$ is simply $X_z$. Note that this scaling
does not alter the PDE or the stepsize, it is just adding a constant to $X$.

The user can also specify the desired timestep. However, the program adjusts this timestep slightly by
the following procedure:
\begin{align*}
M &= \operatorname{round}(T / \Delta T) + 1\\
\Delta T &= T / (M - 1).
\end{align*}
So if the first timepoint is at $\tau = 0$ then the $M^\text{th}$ one will (except for roundoff error) equal $\tau = T$.
The hedge is calculated by using a centered difference approximation of
\[
\frac{\partial V}{\partial S} = \frac{1}{S}\frac{\partial V}{\partial X}.
\]

Our program stores all the grid points, so later on they can be used to generate a 3D-figure. See figure
\ref{fig:3d} for an example for a European call and digital option.

\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{eucall3d.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{digital3d.png}
\caption{
A nice example from our program, showing the value of an European call and digital option
against the time and the current stockprice. Parameters are as given in section \ref{optionspecs}.
The value smoothes out for later times (the time axis holds $\tau = T - t$), most clearly seen for the digital option.
\label{fig:3d}}
\end{center}
\end{figure}

\subsection{Von Neumann stability analysis of the FTCS scheme}
Letting $V_{i}^{n}$ be the computed numerical solution and
$V(i\Delta x, n \Delta \tau)$ be the exact solution, we
define the error $\epsilon^n_i$ on the same point  as
\[
\epsilon^n_i = V_{i}^{n} - V(i\Delta x, n \Delta \tau) \text{.}
\]
The error also needs to satisfy equation \eqref{ftcs}, so we have
\[
 \epsilon_{i}^{n+1} = k_1 \epsilon_{i+1}^{n} + k_0 \epsilon_{i}^{n} + k_{-1} \epsilon_{i-1}^{n} \text{.}
\]
Write the error in Fourier representation
\[
\epsilon_{i}^{n} = A^n(k) e^{J k i \Delta X},
\]
where $k$ is a wavenumber and $J$ is the imaginary unit. Define
the amplification factor $G \equiv \frac{A^{n+1}(k)}{A^n(k)}$. For
stability we need $|G| \leq 1$ for any possible $k$.

\begin{align*}
\epsilon_{i}^{n+1} =\,& k_1 \epsilon_{i+1}^{n} + k_0 \epsilon_{i}^{n} + k_{-1} \epsilon_{i-1}^{n} \\
A^{n+1}(k) e^{J k i \Delta X} =\,&k_1 A^{n}(k) e^{J k (i+1) \Delta X} + k_0 A^{n}(k) e^{J k i \Delta X}\\
															  &+k_{-1} A^{n}(k) e^{J k (i-1) \Delta X}\\
\frac{A^{n+1}(k)}{A^n(k)} = G =\,& k_1 e^{J k \Delta X} + k_0 + k_{-1} e^{-J k \Delta X}
\end{align*}
Filling in the constants and grouping like terms gives
\begin{align*}
G &= 1 - r \Delta \tau 
		+ \frac{\sigma^2 \Delta \tau}{2 \Delta X^2}(e^{J k \Delta X} + e^{-J k \Delta X} - 2)
		+ (r - \frac{1}{2} \sigma^2) \frac{\Delta \tau}{2 \Delta X} (e^{J k \Delta X} - e^{-J k \Delta X})\\
	&= 1 - r \Delta \tau  - \frac{2 \sigma^2 \Delta \tau}{\Delta X^2} \sin^2 \left(\frac{k \Delta X}{2}\right)
		+ J (r - \frac{1}{2} \sigma^2) \frac{\Delta \tau}{\Delta X} \sin(k \Delta X)
\end{align*}
then from the stability condition
\begin{align*}
|G| &\leq 1\\
|G|^2 &\leq 1
\end{align*}
\[
\left[1 - r \Delta \tau  - \frac{2 \sigma^2 \Delta \tau}{\Delta X^2} \sin^2 \left(\frac{k \Delta X}{2}\right)\right]^2
		+ \left[(r - \frac{1}{2} \sigma^2) \frac{\Delta \tau}{\Delta X} \sin(k \Delta X)\right]^2 \leq 1\text{.}
\]

In our test-cases both $r - \frac{1}{2} \sigma^2$ and $\Delta X$ were small.
Then the maximum of the second term will be negligible compared
to that of the first one (over the range of $k$-values),
so that the stability condition becomes approximately
\[
1 - r \Delta \tau  - \frac{2 \sigma^2 \Delta \tau}{\Delta X^2} \geq -1,
\]
or
\begin{equation}
\Delta \tau \leq \frac{1}{\frac{\sigma^2}{\Delta X^2}+\frac{r}{2}}.
\label{taueq}
\end{equation}

\subsection{Stability of the Crank--Nicolson scheme}
One can also derive a stability condition for the Crank--Nicolson scheme,
but this is significantly more cumbersome than for the FTCS scheme.
Because we know from the literature that Crank--Nicolson is unconditionally stable
we therefore leave this derivation out of our report.

\section{Results}
\subsection{Determining optimal stepsizes for FTCS scheme}
To study the behaviour of our FTCS implementation we ran quite a few tests for different stepsizes,
to see how the error in the valuation of a option depends on $\Delta \tau$ and $\Delta X$.
For the tests we used an European call option with the following properties:
\begin{itemize}
 \item Initial stockprice $S_0 = 100$
 \item Strikeprice $K = 100$
 \item Risk free rate $r = 0.04$
 \item Volatility of stockprice $\sigma = 0.3$
\label{optionspecs}
\end{itemize}
Furthermore we set the upper bound for $X$ at one, and the lower bound at minus one. Thus,
the maximum stockprice represented in our discrete scheme is $e \cdot S_0$, and the minimum
is $S_0 / e$. The error we report is the absolute error in the valuation of the option at $t = 0$.

In figures \ref{fig:ftcsdt} $\Delta X$ is kept constant and the timestep
is varied. From equation \eqref{taueq} we estimate the maximum timestep as $\Delta \tau \approx 10 \cdot \Delta X^2$,
which clearly agrees with the plots. Furthermore we see that the error increases with a smaller timestep.
Obviously a smaller timestep leads to more steps, so this might be because the spatial discretization errors are added
more often to the global error. If the optimal $\Delta \tau$ is chosen the error scales roughly as $\Delta X^2$,
which is what we expect because the scheme is second order in the spatial step.

\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{ftcs-conv-dt.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{ftcs-conv-dt2.png}
\caption{
Absolute error in the valuation of the option specified in section \ref{optionspecs} with the FTCS scheme. Here $\Delta X = 0.1$
in the left figure and $\Delta X = 10^{-2}$ in the right one. 
When we vary $\Delta \tau$ the optimum lies at the maximal $\Delta \tau$ that is stable.
Unstable results are left out of the graph.
\label{fig:ftcsdt}}
\end{center}
\end{figure}

Then in figure \ref{fig:ftcsdx} $\Delta \tau$ is kept constant and the spatial stepsize
$\Delta X$ is varied. Again we see our estimate from equation \eqref{taueq} confirmed. At the
optimal $\Delta X$ the error scales roughly as $\Delta \tau$, which is what we expect because the scheme is first
order in the timestep. We conclude that for the FTCS scheme the optimal relation between $\Delta \tau$ and $\Delta X$ is given by
\[
 \Delta \tau \approx 10 \cdot \Delta X^2.
\]

\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{ftcs-conv-dx.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{ftcs-conv-dx2.png}
\caption{
Now $\Delta \tau = 10^{-3}$ in the left figure and $\Delta \tau = 10^{-4}$ in
the right one. When we vary $\Delta X$ the optimum lies at the minimal $\Delta X$ that is stable.
Unstable results are left out of the graph.
\label{fig:ftcsdx}}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\includegraphics[width = 9cm]{ftcs-conv-optimal.png}
\caption{
Using the found optimal relation the convergence is
stable and quadratic in $\Delta X$, linear in $\Delta \tau$.
\label{fig:ftcsoptimal}}
\end{center}
\end{figure}

\subsection{Determining optimal stepsizes for Crank--Nicolson}
For the Crank--Nicolson scheme we used the same strategy as for the FTCS scheme to determine
the optimal stepsizes. 

In figure \ref{fig:cndt} $\Delta X$ is kept constant and the timestep
is varied. Here we see that decreasing the timestep initially decreases the error, but for small
timesteps the error becomes constant. It is clear that at the optimal timestep the error scales
with $\Delta X^2$, which is what we expect because the scheme is second order in the spatial stepsize.

\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{cn-conv-dt.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{cn-conv-dt2.png}
\caption{
Absolute error in the valuation of the option specified in section \ref{optionspecs} with the Crank--Nicolson
scheme. Here $\Delta X = 10^{-2}$ in the left figure and $\Delta X = 10^{-3}$ in the right one.
When we vary $\Delta \tau$ the optimal timestep seems to be about $2 \cdot\Delta X$.
\label{fig:cndt}}
\end{center}
\end{figure}

In figures \ref{fig:cndx} -- \ref{fig:cndx3} $\Delta \tau$ is kept constant and the spatial stepsize
is varied. Here we see that decreasing $\Delta X$ initially decreases the error, but after some point the
error starts to increase. This is not really what we expected, but it might be because of the
$\Delta\tau / (\Delta X^2)$ term in the scheme that becomes large. From these figures and some more
simulations we estimated
the optimal relation between the stepsizes to be
\[
 \Delta \tau \approx 1.7\cdot\Delta X.
\]

\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{cn-conv-dx.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{cn-conv-dx2.png}
\caption{
Now $\Delta \tau = 10^{-3}$ in the left figure and $\Delta \tau = 10^{-3}$ in the right one.
When we vary $\Delta X$ the optimum lies at about $0.5\cdot\Delta T$.
\label{fig:cndx}}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\includegraphics[width = 9cm]{cn-conv-dx3.png}
\caption{
Here $\Delta \tau = 10^{-4}$.
When we vary $\Delta X$ the optimum now lies out of the range displayed here,
but for a smaller $\Delta X$ the program used too much memory. (This
is because we store all the gridpoints in memory to possibly generate 3D figures,
so this memory bottleneck can easily be removed).
\label{fig:cndx3}}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\includegraphics[width = 0.8 \linewidth]{cn-conv-optimal.png}
\caption{
A plot using the estimated optimal relation between the stepsizes for the Crank--Nicolson scheme,
$\Delta \tau \approx 1.7\cdot\Delta X$.
The behavior is quadratic for higher values of $\Delta X$, but
has some sort of branching when $\Delta X$ becomes small.
We do not understand why this happens.
\label{fig:cnoptimal}}
\end{center}
\end{figure}

\subsection{Correctness}
Here we compare some of the results we got with the Black--Scholes analytic values, see
table 1 and 2. All runs were done with the parameters as in section \ref{optionspecs}
but now with a strikeprice of $110$.

When we value a digital option with our program we see that the relative error is a few order of magnitude larger than
for an European option. This is probably due to the large discontinuity in the payoff of a digital option.

\begin{table}[h!]
\begin{tabular}{|c|c|c|c|c|}
\hline
$S_0$ & V & $\Delta$ & Diff with analytical V & Diff with analytical $\Delta$\\ \hline
$100$ & $9.626885$ & $0.486393$ & $1.527450\cdot 10^{-3}$ & $1.012750\cdot 10^{-4}$ \\ \hline
$110$ & $15.128655$ & $0.611582$ & $6.347635\cdot 10^{-5}$ & $4.227773\cdot 10^{-5}$ \\ \hline
$120$ & $21.790301$ & $0.716799$ & $1.492965\cdot 10^{-3}$ & $-3.848622\cdot 10^{-6}$ \\ \hline
\end{tabular}
\caption{FTCS results. We used stepsizes $\Delta X = 10^{-2}$ and $\Delta \tau = 10^{-3}$.}
\end{table}

\begin{table}[h!]
\begin{tabular}{|c|c|c|c|c|}
\hline
$S_0$ & V & $\Delta$ & Diff with analytical V & Diff with analytical $\Delta$\\ \hline
$100$ & $9.625624$ & $0.486341$ & $2.666290\cdot 10^{-4}$ & $4.905980\cdot 10^{-5}$ \\ \hline
$110$ & $15.126812$ & $0.611568$ & $-1.779052\cdot 10^{-3}$ & $2.901425\cdot 10^{-5}$ \\ \hline
$120$ & $21.788755$ & $0.716819$ & $-5.333101\cdot 10^{-5}$ & $1.606690\cdot 10^{-5}$ \\ \hline
\end{tabular}
\caption{Crank-Nicolson results. We used stepsizes $\Delta X = 10^{-2}$ and $\Delta \tau = 2 \cdot 10^{-2}$.}
\end{table}

\subsection{Option value and hedge versus $S_0$}
In figure \ref{fig:valhedge} we show the option value and hedge for a European call option versus the initial stockprice.
Option parameters are as in section \ref{optionspecs}. In figure \ref{fig:dvalhedge} we do the same for a digital option.
These figures were made with the Crank--Nicolson scheme using $\delta \tau = 1.7\cdot 10^{-3}$ and $\delta X = 10^{-3}$.


\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{optionvalue.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{hedge.png}
\caption{
The option value and hedge versus the initial stockprice for an European call option.
Parameters are as in section \ref{optionspecs}.
\label{fig:valhedge}}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\includegraphics[width = 0.49 \linewidth]{doptionvalue.png}
\hfill
\includegraphics[width = 0.49 \linewidth]{dhedge.png}
\caption{
The option value and hedge versus the initial stockprice for a digital option.
In the hedge there is some noise visible around $S = 100$, probably a remnant from
the discontinuity in the payoff.
\label{fig:dvalhedge}}
\end{center}
\end{figure}
\section{Conclusion}
Our experiments confirmed our expectations regarding the stability and convergence of the FTCS
and Crank--Nicolson schemes. The Crank--Nicolson scheme is superior due to its convergence (quadratic
in both the temporal and spatial stepsize) and its unconditional stability. That the scheme is implicit
does not really hurt the performance because the system of equations is tridiagonal, so
that it can be solved in linear time.

For the valuation of simple options integration of the Black--Scholes PDE is much more
efficient and accurate than for example a Monte Carlo method. It also gives an accurate
estimation of the hedge. However for high dimensional problems or options with complex payoff
structures this advantage will disappear.
\end{document}
