\documentclass[11pt]{article}
\usepackage{graphicx}
\usepackage{caption}
\usepackage{subcaption}
\usepackage[margin=1in]{geometry}
\usepackage{url}
\usepackage[title,titletoc]{appendix}
\usepackage[table]{xcolor}
\usepackage[tikz]{bclogo}
\usepackage[round]{natbib}
%\usepackage{cleveref}
%\usepackage{subfigure}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage{eqparbox}
\renewcommand{\algorithmiccomment}[1]{\hfill\eqparbox{COMMENT}{\# #1}}
\algnewcommand{\LineComment}[1]{\State \# #1}
%\algrenewcomment[1]{\State \(\triangleright\) #1}
\usepackage{amsmath}
\usepackage{comment}
\usepackage{mhchem}
\usepackage{array,etoolbox}
\preto\tabular{\setcounter{magicrownumbers}{0}}
\newcounter{magicrownumbers}
\newcommand\rownumber{\stepcounter{magicrownumbers}\arabic{magicrownumbers}}


%\usepackage{times}
%\usepackage{wrapfig}
%\usepackage{float}
%\usepackage[colorlinks,plainpages=false]{hyperref}
%\usepackage[nonumberlist]{glossaries}
\usepackage[font=small,labelfont=bf]{caption}
%\usepackage{standalone}
\usepackage{tikz}
\usetikzlibrary{circuits, circuits.ee.IEC,backgrounds}
\tikzset{circuit declare symbol = vmeter}
\tikzset{set vmeter graphic ={draw,generic circle IEC, minimum size=5mm,info=center:V}}

%\usepackage{circuitikz}
\usetikzlibrary{arrows,calc}
\usepackage{standalone}
\usepackage{multirow}
%\usetikzlibrary{external}
%\tikzexternalize[prefix=figures/]
%\usepackage{gantt}
%\usepackage[none]{hyphenat}
%\makeglossaries
%\loadglsentries{glossary}
\renewcommand{\appendixname}{Appendix}
%\providecommand{\e}[1]{\ensuremath{\times 10^{#1}}}
\renewcommand{\arraystretch}{1.5}
\begin{document}

% \begin{abstract}
%   Multiscale modeling and simulations have become an important trend
%   in neuroscience. However little has been done to provide a platform
%   to perform multiscale simulations in an efficient and reliable
%   way. A lack of interoperability between existing models, an
%   application of different physical and mathematical formalisms on
%   different levels of neural organization and an absence of
%   methodology narrows existing frontiers of possibilities. I will
%   present an efficient solution for simulating models that span
%   multiple temporal and spatial scales. In particular an efficient and
%   a reliable method for coupling separate numerical engines will be
%   considered. Analysis of the proposed method will be conducted. The
%   process of builing, verification and simulation of a test multiscale
%   system will be discussed.
% \end{abstract}

\section{Introduction}

% The field of numerical analysis has been existing already since some
% centuries before the invention of modern computers. The fast
% development of a computational power boosted the development of more
% sophisticated and efficient numerical methods that found
% applications in many fields of engineering, physical sciences as
% well as the life sciences such as biology and neuroscience. In turn,
% this allowed to build more complex models and to simulate more
% complex phenomena.
% \begin{comment}
%   the development of numerical analysis and methods
% \end{comment}

% In different areas of science problems require mathematical
% modeling.  If the problem can not be solved analytically
% approximation of the solution is found using numerical approximation
% methods.  In neuroscience computational modeling and simulations
% provide an important tool to study phenomena at different levels of
% abstraction: from molecules on a sub-cellular level to a single
% neuron and population of those on a network and system
% levels~\citep{djurfeldt2007workshop}.
\begin{comment}
  mathematical modeling and simulations
\end{comment}
The concept of multiscale modeling is used in many fields such as
meteorology~\citep{shukla2009seamless, kurowski2013toward}, cardiac
physiology~\citep{hernandez2011integration} and
neuroscience~\citep{bhalla2014multiscale}.  It implies interaction of
different levels of physical organization across different temporal
and spatial scales. For instance, in neuroscience, the phenomena of
synaptic plasticity % , is one of the major
% questions to answer. The phenomena
span different physical and chemical processes: physiological, cell
electrodynamics and molecular biology. Each process acts on its own
temporal and spatial scale and interacts bidirectionally and
continuously with the other processes.
\begin{comment}
  the concept of multiscale modeling in neuroscience
\end{comment}

An idea of data integration between different levels of neural
organization has become an important trend in
neuroscience. \cite{bhalla2011multiscale} explores a cross-scale
interactions between cellular and subcellular levels in the context of
homeostasis and synaptic plasticity. The multiscale model proposes the
pruning mechanism of the weak synapses during cellular excitability.
Another study by \cite{mattioni2013integration} presents an
integration between electrical and biochemical processes in the model
of a Medium Spiny Neuron (MSN). In particular, the influence of
different input patterns on membrane excitability and the mechanism of
inter-spine synaptic plasticity is studied.

In order to obtain the global multiscale system behavior the coupling
of different components can be done on three different model
description levels~\citep{kubler2000two}. On a physical level the
system is represented by a set of physical parameters, for instance,
by morphological, electrical or biochemical parameters of a cell. Then
the mathematical model description gives an abstract representation of
the system by means of mathematical equations. Finally, results of the
simulation of a mathematical model are presented on a behavioral model
description level.

On the model behavioral description level the coupling implies an
integration of different simulators or modules. One of the main
advantages of the coupling on this level is the possibility to use
already existing software packages specialized for certain domains of
interest. Besides, the distributed numerical computations makes this
approach especially attractive for performing simulations in a
parallel environment.

There are quite a few considerations that have to be taken while
coupling different simulators. First is the practical aspect, for
instance a synchronization algorithm between the components and a data
scaling strategy. \cite{mattioni2013integration} proposed an
event-driven algorithm where the synchronization of the scaled
exchanged variables of interest have to be communicated each time the
event happens. The algorithm showed better performance results in
comparison with the time-driven algorithm where the synchronization of
the variables has to be performed at the regular time
intervals. Second is the theoretical aspect of the simulator coupling
that has not been taken into considerations
yet. % This paper will address the question of coupling
% numerical engines of different simulation packages. In particular,
% we focus on an efficiency of distributed numerical integration with
% respect to a numerical accuracy.  while solving multiscale
% mathematical model in a decoupled fashion.  ~\footnote{The decoupled
% multiscale mathematical model is used as a synonym to the coupling
% of different components}.

Different coupling techniques may suffer from such numerical
phenomenon as instability \citep{arnold2001preconditioned}. One of the
main challenges in coupling numerical engines is to ensure convergence
of a discrete system. If a coupled integration is convergent, a proper
choice of the step size still has to be made in order to guarantee
numerical stability. It is important to keep in mind that the
numerical stability of a coupled integration is not guaranteed by the
stability of a separate integration of the system components. Then,
numerical integration methods, the order of approximation of the
exchanged variables has to be taken into consideration.  At the same
time, it is crucial to provide an integration in an efficient way
keeping the accuracy within desired bounds. Finally, the problem is
becoming complicated by simulating hybrid multiscale systems. For
instance, chemical interactions can be described either in a
deterministic or in a stochastic way possibly accompanied by diffusion
processes.

% An absence of reliable and efficient methods to perform multiscale
% simulations makes it non-trivial to integrate the data across
% multiple temporal and spatial scales.  We see several possible
% approaches to simulate multiscale models: either to have a
% monolithic simulator that spans domains of interest or to have a
% methodology for coupling together separate domains.

% The development of the "all-in-one" framework can become of a great
% importance once a new theoretical foundation of the emergence of
% different levels of organization will be established. Otherwise an
% attempt to build such a framework can become a very sophisticated
% task. Another disadvantage can be considered is an absence of
% interoperability with already existing models. Since the largest
% portion of the existing models were formulated by incompatible model
% definition formats of different simulation packages. The
% reimplementation of the models is an error prone and time consuming
% task. % To overcome this problem most of the developers have agreed to
% use simulator-independent standards such as
% SBML~\citep{hucka2003systems}. Also an active work towards the
% development of multiscale model standard is going
% on~\citep{gleeson2010neuroml}.

The paper aims to focus on the numerical aspects of different coupling
strategies. We present the method that allowed us to bridge
subcellular and cellular level models described by Ordinary
Differential Equations (ODEs) in an accurate and efficient way.

% We present the first steps towards the development of a methodology
% for coupling neural models that encompass multiple physical and
% chemical processes and act on different temporal and spatial
% scales. In particular, the proposed method allowed us to bridge
% subcellular and cellular level models described by Ordinary
% Differential Equations (ODEs) in an accurate and efficient way.

% consider the problem of coupling separate domains in
% neuroscience. Coupling together existing
% domains~\citep{bhalla2011multiscale} or simulation
% packages~\citep{djurfeldt2010run} implies a development of the
% methodology to couple different numerical engines.


We begin with an overview of numerical engines widely used in the
scope of interest (Section~\ref{sec:numerical_methods}). In
Section~\ref{sec:adaptive_solver} we introduce an algorithm for an
adaptive control of the integration step size.  We present possible
organization strategies in the system composed of multiple components
in Section~\ref{sec:organizations}.  Section~\ref{sec:test_model}
describes the multiscale test model we build: its dynamics,
communication signals between the components, mathematical
formulation, details of implementation and verification
processes. Then, in Section~\ref{sec:results} we show the results and
analysis of the proposed coupling technique. Finally, in
Section~\ref{sec:discussion} we discuss the insights we gained from
this work, recommendations and possible future directions of research.
\section{Methods}
\subsection{Numerical methods}\label{sec:numerical_methods}
We focus on cellular and subcellular (molecular) levels of neuronal
organization. On the cellular level electrical properties of cell
membranes are typically studied. The cell with its complex
arborizations is usually represented by a cable usually split into a
number of compartments. Then, we employ compartmental
modeling~\citep{rall1964theoretical} to describe the neural processes
where the dynamics of each compartment are defined by a coupled system
of ordinary differential equations (ODEs). Finally, by applying the
Hodgkin and Huxley formalism to define the
currents~\citep{hodgkin1952quantitative} a nonlinear system of ODEs
has to be solved. This approach is essentially the basis of all
simulators that take neural morphology into account
(e.g. NEURON~\citep{hines1989program},
GENESIS~\citep{wilson1990genesis},
MOOSE~\citep{dudani2009multiscale}).

On a molecular level the interaction of biochemical signaling pathways
is of a particular interest. A signaling pathway is usually referred
as a set of reactions between the molecules that operate on a
subcellular level~\citep{bhalla1998network}. To define a signaling
pathway chemical kinetics or chemical rate theory is usually applied.
One of the traditional ways to model the kinetics is by viewing the
system of reactions as deterministic. Then the chemical species are
usually considered in concentration units that evolve over time. These
models are usually described by a system of nonlinear ODEs.

The complexity of the systems requires numerical
computations. Simulation packages offer the user a choice between
different numerical integration methods. The choice is usually made by
running the simulation with different step sizes of the discretization
in time and comparing how fast the problem can be solved with the
desired accuracy. However it is crucial to consider the stability
properties of the method as well. If a system of ODEs is
\textit{stiff} and is solved with explicit numerical methods then the
step size of the discretization is limited by stability and not by
accuracy. Then, an efficient numerical integration requires
\textit{implicit} methods. These methods allow the simulation to be
discretized with the larger time steps due to the good stability
properties.

The Crank-Nicholson (CN) method with the staggered time step approach
is widely used for solving branched nerve equations in
neuroscience~\citep{hines1984efficient}. The proposed approach allows
the user to obtain a solution in an accurate and efficient way
(Appendix~\ref{appx:cn_staggered}). The Crank-Nicholson method is an
implicit method and can be used for stiff systems. However little is
known about stability properties while working on a staggered grid
(see Appendix~\ref{appx:stability_cn_staggered} for details).  We have
implemented and applied this method to approximate the solution of the
electrical model. We will refer to it as modified CN (mCN) further on.

We have implemented the Classical Runge-Kutta (RK4) method to solve
our biochemical model~\citep{bhalla2011multiscale}. This method is
known to be an explicit numerical method with a bounded stability
domain and therefore it is not suitable for stiff problems. A
mathematical formulation of the method can be found in
Appendix~\ref{appx:crk}.

While both the mCN and the RK4 methods provide an efficient mean for
simulating the models on the appropriate levels, the coupling of these
methods poses additional problems.

\begin{enumerate}
\item Multiscale systems are usually composed from components acting
  on different timescales. For example, the timescale of a single
  spike is of the order of few milliseconds. However, simulations
  usually run for many seconds in order to observe the effects on a
  biochemical level. The gap in the timescales demands an efficient
  integration strategy. We suspect adaptive integrators can be more
  efficient or require less time for a given degree of accuracy due to
  the alternate activity of fast and slow components in multiscale
  systems. Both the mCN and the RK4 methods are normally applied on a
  fixed step size grid. None provides an error estimation mechanism
  that could be used together with a step size control mechanism.
\item Little is known about the error propagation for the mCN - RK4
  coupled integration. The question whether second order of accuracy
  of the mCN method remains is opened.
\item Numerical stability of the system components solved separately
  does not guarantee the stability of the coupled system in general as
  shown in Appendix~\ref{appx:instability}. Our multiscale system has
  to be stiff due to the rapid changes of the electrical
  component. For being on a safer side we aim to avoid using methods
  not suitable for stiff problems.
\end{enumerate}

Considering the points above we implement another approximation
method, the Backward Differentiation Formula (BDF). In particular, we
are interested in a second-order BDF (BDF2) widely used for solving
stiff differential equations and Differential Algebraic Equations
(DAEs). The BDF2 method was analyzed with respect to its stability
properties and error estimates were provided within a
decoupled\footnote{ In \cite{skelboe2000accuracy} a given system has
  to be partitioned into loosely coupled subsystems first and then
  decoupled formulas are applied to integrate the system. Here, the
  subsystems to be coupled are given. Thus we apply a term "coupled"
  hereinafter.}  numerical integration
strategy~\citep{skelboe2000accuracy}. Them mathematical formulation of
the method is given in Appendix~\ref{appx:bdf2}. We apply this method
to approximate both solutions on cellular and subcellular levels. We
have implemented the method on a fixed step size grid in order to be
able to perform straightforward comparison with the mCN and the RK4
methods. However for most of the results we use the method
implementation with an adaptive step size controller as described in
Section~\ref{sec:adaptive_solver}.

% We expect our multiscale system to be stiff due to the rapid changes
% in the electrical component.


\subsection{Adaptive step size
  controller}\label{sec:adaptive_solver} % should this be separated in subsections?
The aim of the adaptive step size controller is to reduce the
computational cost of the simulation while keeping the local error
within acceptable
bounds. % Especially it can become beneficial and even
% necessary to guarantee numerical stability when the problem is
% stiff.
The reduced computational cost can mainly be achieved by a reduced
number of the required time steps at which a solution is approximated.

We implement an adaptive controller of the step size described in
\cite{deuflhard2002scientific}. It calculates a local discretization
error (to be exact an approximation of it) $|\left[\epsilon_j\right]|$
of the taken step $h_j$ to calculate an optimal step size $h$ (
Algorithm~\ref{alg:controller}, line \#\ref{alg:controller-line9}) and
then uses it as a predictor for the nest step size. In other words, if
the calculated quality of the current step $h_j$ is good enough
(Algorithm~\ref{alg:controller}, line \#\ref{alg:controller-line10}),
the algorithm continues with the just calculated optimal step size
$h$ as the \textit{next} step size. Otherwise, the just calculated
optimal step size $h$ is used as the \textit{current} step size
(Algorithm~\ref{alg:controller}, line \#\ref{alg:controller-line17})
and the current step has to be redone. A description of the algorithm
is shown in Algorithm \ref{alg:controller}.

% To ensure the required precision of the system, one would like to
% have a control over the global error. This is not achievable, but
% instead we can control the local discretization error,
% $||\epsilon_{j+1}||$. Unfortunately, we can not calculate this error
% exactly, and have to settle with an approximation
% $|\left[\epsilon_j\right]|$, and thus require
% \begin{equation}
%   |\left[\epsilon_j\right]| < TOL
% \end{equation}
% Here, $TOL$ is a tolerance defined by the user. The error estimate
% $|\left[\epsilon_j\right]|$ is a measure of the quality of the taken
% step. Using this, we want to calculate the optimal value for the
% next step. This is not doable, since it requires knowledge of the
% future that we do not have. Instead, we calculate the optimal
% current step size $h^*_j$. If the calculated quality of the
% current step is good enough, we continue to the next step with
% $h^*_j$ as a prediction of the \textit{next} step size. If the
% quality is not good enough, we use the now calculated optimal step
% size $h^*_j$ as the \textit{current} step size and redo this
% step. This leads to the algorithm shown in Algorithm
% \ref{alg:controller}. Of special interest is line number
% \ref{alg:controller-line9}. This statement consists of two important
% parts. First, it gives an approximation of $h^*_j$ as
% \begin{equation}
%   h^*_j = \sqrt[p+1]{\frac{\rho\cdot TOL}{|\left[\epsilon_{j+1}\right]|}}h_j \label{eq:tau star}
% \end{equation}
% This is based on the following;

% For the ''optimal'' step $h^*_j$, we require that the
% corresponding error, $|\epsilon^*_j|$ is close to the tolerance
% level $TOL$. If it is to far below, the algorithm won't be efficient
% and if it is much larger, the results won't be dependable. Thus, we
% want
% \begin{equation}
%   |\epsilon^*_j| \approx TOL
% \end{equation}
% Since
% \begin{equation}
%   \left|\left[ \epsilon_{j+1}\right] \right|\approx \left| \epsilon_{j+1} \right| = c(t_j)h^{p+1}_j + \Omega(h^{p+2}_j) \approx c(t_j)h^{p+1}_j
% \end{equation} 
% and
% \begin{equation}
%   TOL \approx |\epsilon^*_j| \approx c(t_j)(h^*_j)^{p+1}
% \end{equation}
% we get, after reorganization that
% \begin{equation}
%   \frac{TOL}{(h^*_j)^{p+1}} = c(t_j) = \frac{\left|\left[ \epsilon_{j+1}\right] \right|}{h^{p+1}_j}
% \end{equation}
% Rearranging this expression gives
% \begin{equation}
%   \left( \frac{h^*_j}{h_j} \right)^{p+1}  = \frac{TOL}{\left|\left[ \epsilon_{j+1}\right] \right|}
% \end{equation}
% Rearranging this equation and including a safety factor $\rho$ for
% the tolerance gives Equation \ref{eq:tau star}. The second part of
% the statement at line number \ref{alg:controller-line9} is also a
% precaution. It both gives an absolute maximum time step,
% $h_{max}$ as well as an maximal increase defined by $q$. This is
% to ensure that the step size don't accelerate to fast.

% \begin{table}[h]
%\centering
%\begin{tabular}{cl}
%$\left| \epsilon_{j+1} \right|$  & Local error contribution \\
%$\left|\left[ \epsilon_{j+1}\right] \right|$ & Computable estimate of $\left| \epsilon_{j+1} \right|$ \\
%$\left| \epsilon_{j+1}^* \right|$ & Local error contribution of step of ''optimal'' length.
%\end{tabular}
%\caption{Explanations of the different $\epsilon$}
%\end{table}

\begin{algorithm}
  \caption{Step size
    controller~\citep{deuflhard2002scientific}} \label{alg:controller}
  \begin{algorithmic}[1]
    \State $h_0=\bar{h}_0$\Comment{Choose initial step size
      prediction $\bar{h}_0$}
    % \State Choose initial value $x_0$ and first time step $h_0$
    \State $j = 0$ \Comment{Initiate the iteration index} \State
    $\Delta_t = \{t_0\}$ \Comment{Initiate the time set} \State
    $x_{\Delta}(t_0) = x_0$ \Comment{Initiate the solution set}
    \While{$t_j < T$} \Comment{Within the simulation time $T$ do:}
    \State $t = t_j + h_j$ \State $x = \Psi^{t,t_j}x_{\Delta}(t_j)$
    \Comment{Advance the solution from $t_j$ to $t$} \State compute
    $|\left[\epsilon_j\right]|$ \Comment {See
      Section~\ref{sec:error_estimation}}\State $h
    =\min\left(qh_j,h_{max}, \sqrt[p+1]{\frac{\rho TOL}
        {|\left[\epsilon_j\right]|}}h_j\right)$ \label{alg:controller-line9}
    \Comment{Calculate an optimal time step $h$}
    \If{$|\left[\epsilon_j\right]|\leq
      TOL$} \label{alg:controller-line10} \Comment{Step is accepted}
    \State $t_{j+1} = t$ \State $\Delta_t = \Delta_t \cup \{t_{j+1}\}$
    \Comment{Update the time set} \State $x_{\Delta}(t_{j+1}) = x$
    \Comment{Update the solution set}\State $h_{j+1}$ =
    min$(h,T-t_{j+1})$ \Comment{Set the next time step}\State
    $j=j+1$ \Comment{Advance integration}\Else \Comment{Step is
      rejected} \State $h_j = h$ \label{alg:controller-line17}
    \Comment{Set the current time step}
    \EndIf
    \EndWhile
  \end{algorithmic}
\end{algorithm}

The optimal time step $h$ is calculated according to the following
formula:
\begin{equation}\label{eq:opt_tau}
  h =\min\left(qh_j,h_{max},\sqrt[p+1]{\frac{\rho TOL}
      {|\left[\epsilon_j\right]|}}h_j\right),
\end{equation}
where $\rho$ ($\rho < 1$) is a safety factor; $p$ is the order of the
discretization method specified in the discrete evolution
$\Psi^{t+h,t}$; $qh_j~(q>1)$ and $h_{max}$ are additional
bounds on the step size increase for the situations when the error
estimation value ($|\left[\epsilon_j\right]|$) becomes or is close to
zero.

The last term of~\eqref{eq:opt_tau} represents an
\textit{I-controller} mechanism. According to
\cite{deuflhard2002scientific} there can be situations where a
\textit{ PID-controller} is preferable:
\begin{equation}\label{eq:PIDcontroller}
  h_{k+1}=\left(\frac{\rho\cdot Tol}{|\left[\epsilon_{k+1}\right]|}\right)^{\beta_I+\beta_P+\beta_D} 
  \left(\frac{|\left[\epsilon_{k}\right]|}{\rho\cdot Tol}\right)^{\beta_P+2\beta_D}
  \left(\frac{\rho\cdot Tol}{|\left[\epsilon_{k-1}\right]|}\right)^{\beta_D}h_k
\end{equation}
Note, letting the parameters ($\beta_P,\beta_D,\beta_I$)
of~\eqref{eq:PIDcontroller} be chosen such that $\beta_P=\beta_D = 0$
and $\beta_I=1$ Equation~\ref{eq:PIDcontroller} represents
I-controller.

In order to implement Algorithm~\ref{alg:controller} an error
estimation mechanism should be provided.

\subsubsection*{Estimate of the local discretization
  error} \label{sec:error_estimation} The error estimation is a method
dependent technique. Below we describe the error estimation for the
BDF2 method based on a predictor-corrector algorithm. The difference
of the discrete evolutions given by~\eqref{eq:error control}
represents an estimate of the local error $|\left[\epsilon\right]|$.
\begin{equation}\label{eq:error control}
  \left|\left| \Psi^{t+h,t}x - \hat{\Psi}^{t+h,t}x\right|\right| \leq TOL,
\end{equation}
where the prediction step taken by the discrete evolution
$\hat{\Psi}^{t+h,t}$ calculates a rough approximation of the solution
and the corrector step taken by $\hat{\Psi}^{t+h,t}$ refines the
initial approximation.

% bound the norm of the difference between the second order polynomial
% predictor $Y^{p2}_n$ and the approximated solution $Y_n$ to control
% the local error (Equation~\ref{eq:error control}) at each time
% step. We use the same polynomial function $Y^{p2}_n$ as a predictor
% in error estimation as the one required to approximate exchanged
% variables (Section~\ref{sec:extrapolation}). To approximate the
% solution $Y_n$ we use BDF2 method according to
% Section~\ref{sec:numerical_methods}.
Often, the tolerance $TOL$ is set as a combination of a relative
tolerance, $relTOL$, and an absolute tolerance, $absTOL$. This can be
expressed as
\begin{equation}
  \left|\left| \Psi^{t+h,t}x - \hat{\Psi}^{t+h,t}x\right|\right| \leq relTOL \cdot \left|\left|\Psi^{t+h,t}x\right|\right| + absTOL
  % \left|\left| Y_n - Y^{p2}_n\right|\right| \leq Tol = relTol \cdot
  % \left|\left|Y_n\right|\right| + absTol
\end{equation}
From a practical point of view, it is more appropriate to use a
similar requirement component-wise. Rearranging it gives the error
control quantity $\left|\left[ \epsilon\right] \right|$ :
\begin{equation}\label{eq:error_estimation}
  \left|\left[ \epsilon\right] \right| = \max_i\frac{\left|\Psi^{t+h,t}x_i - \hat{\Psi}^{t+h,t}x_i\right|}{relTol\cdot \left|\Psi^{t+h,t}x_i\right| + absTol_i}\leq 1
  % \left|\left[ \epsilon_{j+1}\right] \right| =
  % \max_i\frac{\left|Y_{n,i}-Y^{p2}_{n,i}\right|}{relTol\cdot
  % \left|Y_{n,i}\right| + absTol_i}\leq 1 = TOL
\end{equation}
% Moreover, it is common to express the absolute tolerance as the
% relative tolerance multiplied with a typical solution as
% $\mathrm{absTol_i} = \mathrm{relTol} \cdot |Y_{typical,i}|.$
Therefore, $TOL$ in Algorithm~\ref{alg:controller} becomes simply
$TOL=1$.

The best efficiency is usually achieved if both the predictor and the
corrector are of the same order. Furthermore polynomial interpolations
formulas are preferred as predictors in connection with stiff
problems~\citep{skelboe2000accuracy}. We have chosen the second order
polynomial formula described later on in
Section~\ref{sec:extrapolation} as a predictor
in~\eqref{eq:error_estimation}.


\subsection{Organization of computations}\label{sec:organizations}

In the distributed numerical integration the components of a system
are solved separately on time windows $[t_n,t_{n+1}]$. Then
information is exchanged at synchronization points $0=t_0<\dots < t_n
< t_{n+1}<\dots$ . Different aspects of integration are usually
considered: the use of different discretization step sizes in the
components, the coupling of different numerical methods, various
organizations of computations between system components. Here, we
study two organizations: \textit{Jacobi} and
\textit{Gauss-Seidel}. The choice may have a crucial impact on both
numerical stability and accuracy.

We introduce the notion of \textit{macro time step} and \textit{micro
  time step}. The macro time step determines the communication points:
how long the components can run independently from each other without
losing accuracy. Micro time step determines the discretization points
of each component between two communication points. The latter is
usually determined by accuracy, stability and the numerical method
used. In all our simulations we choose the micro-step equal to the
macro-step, unless stated otherwise.

For simplicity we take an abstract system composed of two components:
Component~\textit{1} and Component~\textit{2}, then we can define the
system using continuous representation as:

\begin{equation}\label{eq:System}
  \begin{aligned}
    &\frac{d}{dt}x_1 = f_1(t,x_1,x_2)\\
    &\frac{d}{dt}x_2 = f_2(t,x_1,x_2),
  \end{aligned}
\end{equation}
where $x_{\mathrm{1}},x_{\mathrm{2}}$ are solution vectors of the
according component.


% An approximation of unknown variables should be considered.  The
% need occurs, for example, when one of the components requires an
% implicit method or when each component is solved with its own
% discretization time step.


\subsubsection*{Jacobi organization}

Jacobi organization in the system with two components leads to the
interaction shown on Figure~\ref{fig:jacobi}. In order to make a step
from time $t_n$ to $t_{n+1}$ each component gets exchanged variables
at time $t_n$ (white triangle arrows on
Figure~\ref{fig:jacobi}). Then, the components proceed to the time
point $t_{n+1}$.
\begin{figure}[h!]
  \centering
  \includestandalone{figures/jacobi}
  % \includegraphics[width=0.6\textwidth]{figures/jacobi.pdf}
  \caption{Discretization in time of System~\ref{eq:System} using
    Jacobi organization. White triangle arrows correspond to
    communication signals between Component~\textit{1} and
    Component~\textit{2}. $H_n$ is a macro time step in the system,
    $h_{x_1},h_{x_2}$ are micro time steps of Component~\textit{1}
    and~\textit{2} accordingly.}
  \label{fig:jacobi}
\end{figure}


Requiring the exchanged variables to be known at time $t_{n+1}$ and
considering Jacobi organization, \eqref{eq:System} can be rewritten in
the form:

\begin{equation}\label{eq:jacobi}
  \begin{aligned}
    &\frac{d}{dt}x_1 = f_{1}(t,x_{1,n+1},\tilde{x}_{2,n+1})\\
    &\frac{d}{dt}x_2 = f_{2}(t,\tilde{x}_{1,n+1},x_{2,n+1}),
  \end{aligned}
\end{equation}
where $\tilde{x}_{1,n+1}$ and $\tilde{x}_{2,n+1}$ are approximated
solutions of Component~\textit{1} and Component~\textit{2}
respectively. This organization works very well in a parallel
computations since no component needs to wait for the other.

\subsubsection*{Gauss-Seidel organization}

The Gauss-Seidel organization updates each component in a sequential
order (Figure~\ref{fig:GS}). Let Component~\textit{1} be the leading
component in~\eqref{eq:System}. Then, after the solutions have been
communicated at time $t_n$ (white triangle arrows on
Figure~\ref{fig:GS}), Component~\textit{1} proceeds until
$t_{n+1}$. Then the calculated solution of Component~\textit{1} at
time $t_{n+1}$ can by used by Component~\textit{2}. This communication
principle has been used by~\cite{mattioni2013integration}.

\begin{figure}[h!]
  \centering
  \includestandalone{figures/gausseidel}
  % \includegraphics[width=0.6\textwidth]{figures/gausseidel.pdf}
  % \input{figures/gausseidel.tex}
  \caption{Discretization in time of System~\ref{eq:System} using
    Gauss-Seidel organization. White triangle arrows correspond to
    communication signals between Component~\textit{1} and
    Component~\textit{2}. $H_n$ is a macro time step in the system,
    $h_{x_1},h_{x_2}$ are micro time steps of Component~\textit{1}
    and~\textit{2} accordingly.}
  \label{fig:GS}
\end{figure}

The Gauss-Seidel organization allows to eliminate the error introduced
by a solution approximation as shown in~\eqref{eq:GS}. However it is
more difficult to apply this strategy efficiently during parallel
simulations since at least one component has to wait for the other.

\begin{equation}\label{eq:GS}
  \begin{aligned}
    &\frac{d}{dt}x_1 = f_{1}(t,x_{1,n+1},\tilde{x}_{2,n+1})\\
    &\frac{d}{dt}x_2 = f_{2}(t,x_{1,n+1},x_{2,n+1}),
  \end{aligned}
\end{equation}
where $\tilde{x}_{2,n+1}$ is an approximated solution of
Component~\textit{2}.

% In comparison with Jacobi, the Gauss-Seidel organization is more
% difficult to apply in parallel simulations efficiently.

\subsubsection*{Exchanged variables
  approximation}\label{sec:extrapolation}
We aim to simulate two components independently where the solution of
one component depends on the solution of another component each
integration time step as shown in~\eqref{eq:System}. It can happen
that the information is not available at a certain time point. Then an
approximation of exchanged variables can be considered. For instance,
the application of an implicit numerical method requires the knowledge
at the time point $t_{n+1}$ as shown in \eqref{eq:jacobi} and in
\eqref{eq:GS}.

Here we will compare two approximation strategies so called
\textit{Mode~1} and \textit{Mode~3} \citep{skelboe2000accuracy}.
Mode~1 implies a constant extrapolation $\tilde{x}_{n+1} = x_{n}$, so
that the solution at the previous time step $n$ is used when
required. % In the Mode~2 a linear
% interpolation formula is applied to calculate $ \tilde{y}_{n+1}$:
% \begin{equation}
%   \tilde{y}_{n+1} = y^{p}_{n+1} = y_{n}+\gamma_{n+1}(y_{n}-y_{n-1}),
% \end{equation}
% where $\gamma_{n+1}=h_{n+1}/h_{n}$.
Mode~3 uses a second order polynomial to approximate the value at time
$t_{n+1}$:
\begin{equation}\label{eq:predictor}
  \tilde{x}_{n+1}=x^{p2}_{n+1} = \bar{\alpha}_{1}x_{n}+\bar{\alpha}_{2}x_{n-1}+\bar{\alpha}_{3}x_{n-2}
\end{equation}

\begin{equation*}
  \bar{\alpha}_1 = 1-\bar{\alpha}_2-\bar{\alpha}_3,\quad
  \bar{\alpha}_2 = \frac{\gamma_{n+1}(\gamma_{n+1}+\delta_{n+1})}{1-\delta_{n+1}},\quad
  \bar{\alpha}_3 = \frac{\gamma_{n+1}(\gamma_{n+1}+1)}{\delta_{n+1}(\delta_{n+1}-1)},
\end{equation*}
where $\gamma_{n+1}=h_{n+1}/h_{n}$ and $\delta_{n+1}=1+h_{n-1}/h_{n}$.
% We have compared all there strategies on both fixed step size grid
% and with the adaptive solver. The Mode~3 showed the most accuare
% results in our simulations. Therefore Since the latter method
% requires the knowledge at the two previous time steps, we have
% choosen to use the constant extrapolation for the first first two
% steps since the knowledge at the two previous time steps and for the
% remaining steps we use the second order polynomial to approximate
% the exchanged variables.


\subsection{Test model}\label{sec:test_model}
In our test multiscale model we span two levels of neural
organizations: we model the electrical dynamics of a single neuron and
biochemical processes in one of its compartments named
\textit{spine}. The parameters of the models are given in
Appendix~\ref{appx:models}.

\subsubsection*{Dynamics}

The stimulating current is applied to the \textit{soma}, resulting in
depolarization of the membrane potential. The change in the membrane
potential activates the voltage dependent sodium and potassium
channels and a spike train propagates through the axial resistance to
the spine. Spine depolarization activates the voltage dependent
calcium channels and a calcium current flow into the spine. These
processes act on a time scale of few milliseconds. In turn, Ca influx
triggers multiple signaling cascades on a sub-cellular level. We use
the mitogen-activated protein kinase (MAPK/ERK) signaling model taken
from~\cite{bhalla2011multiscale}. This model exhibits a bistable
chemical switch that is triggered by the calcium influx. This switch
then leads to the changes in synaptic conductance of the neuron. The
time scale of the chemical system is approximately 10~s, thus around
100 times larger than that of the electrical system. These dynamics
mimic the one considered to play an important role in such phenomena
as synaptic plasticity~\citep{hayer2005molecular}.

% \begin{figure}[h!]
%   \centering
%   \includegraphics[width=0.75\textwidth]{figures/cell2.pdf}
%   \caption{The solution of the electric system for the first
%   200~ms. Oscillations correpond to the current injection period.}
%   \label{fig:cellsolu}
% \end{figure}

% \begin{figure}[h!]
%   \centering
%   \includegraphics[width=0.75\textwidth]{figures/erk.pdf}
%   \caption{The solution for the biochemical system with the calcium
%   clump at 1~uM for 5~s.}
%   \label{fig:erksolu}
% \end{figure}

% The parameters of the electrical model were taken from the squid
% giant axon model ( the folder \textit{Demos} in the
% MOOSE~\citep{ray2008pymoose} repository). Parameters tuning was
% necessary to induce a certain spike train under the stimulation of
% the soma that brings calcium concentration to a desired
% range. % Calcium
% elevation of 1~uM for about 5~s is sufficient to trigger a
% biochemical switch in the MAPK/ERK model.

The current of ${0.09\cdot 10^{-9}}$~A is injected for 5~s that is
sufficient to elevate ${Ca^{2++}}$ level up to 1~$\mathrm{\mu M}$
required for the biochemical switch. When the injection has finished,
the simulation continues until it reaches 45~s of the total simulation
time. Then the biochemical system has settled at its second steady
state.

We choose three molecular concentrations to plot as our reference
solution. Figure~\ref{fig:refsolu} shows calcium $Ca^{2+}$,
phosphorylated form of MAPK (\textit{P\_MAPK}) and potassium $Ka$
concentration traces. When \textit{P\_MAPK} is activated, it
phosphorylates potassium and leads to its non-conductivity.  The thick
part of the calcium concentration trace corresponds to the oscillating
behavior of the $Ca^{2+}$ current during the stimulation period
between 1~s and 6~s of the simulation time.

\begin{figure}[h!]
  \centering
  \includegraphics[width=0.75\textwidth]{figures/correct_sol.eps}
  \caption{The solution of $Ca^{2+}$, \textit{P\_MAPK} and \textit{Ka}
    molecule concentrations obtained with
    \textit{ode15s}\protect\footnotemark~MATLAB function. To achieve
    high accuracy, \textit{Reltol} and \textit{Abstol} were set to
    $10^{-9}$ and $10^{-12}$ respectively.}
  \label{fig:refsolu}
\end{figure}

\footnotetext{\textit{ode15s} is a solver designed for stiff
  problems. It is a quasi-constant step size implementation of the
  backward differentiation methods~\citep{shampine1997matlab}.}


\subsubsection*{Communication signals}

% ${Ca^{2+}}$ signaling is known to play an important role in a wide
% range of phenomena in neuroscience.
We use ${Ca^{2+}}$ influx as a key signal in our multiscale model. The
${Ca^{2+}}$ current in the electrical model ($I_{Ca}$) is transformed
to the ${Ca^{2+}}$ injection rate to the biochemical model ($k_{inj}$)
as shown in Equation~\ref{eq:Kinj}.
\begin{equation}\label{eq:Kinj}
  k_{inj}=\frac{6.242\cdot 10^{18}}{2\cdot N_{A} \cdot vol}\cdot I_{Ca} \quad \left[\frac{M}{s}\right],
\end{equation}
where $N_{A}$ is Avogadro constant, $vol$ is the volume of the spine
compartment.

In turn, the biochemical system provides calcium concentration and the
fraction of phosphorylated calcium dependent potassium channels in the
spine. The fraction then is used in the conductance evolution of the
$I_{K}$ current in the electrical system ( Equation~\ref{eq:frac} ).
\begin{equation}\label{eq:frac}
  g_{K}=\bar{g}_{K}\frac{[Ka]}{[Ka]^*} \quad \left[S\right],
\end{equation}
where ${[Ka]}$ is the concentration of non-phosphorylated calcium
dependent potassium channels, $[Ka]^*$ is the total concentration of
calcium dependent potassium channels, $\bar{g}_{K}$ is the maximum
potassium conductance.

\subsubsection*{Mathematical formulation}
We use compartmental modeling with the HH formalism to define 17
subsequent electrical circuits of the neuron that result in 24 ODEs
(Appendix~\ref{appx:electrical}). Chemical reactions in the spine are
defined by reaction-rate equations constituting a non-linear system of
18 ODEs (Appendix~\ref{appx:biochemical}). Considering the
communication signals in the system, \eqref{eq:System} can be
reformulated in the following form:
\begin{equation}\label{eq:sys_combined}
  \begin{aligned}
    &\frac{d}{dt}x_{chem} = f_{chem}(t,x_{chem},g_{1}(x_{elec}))\\
    &\frac{d}{dt}x_{{elec}} =
    f_{{elec}}(t,g_{2}(x_{{chem}}),x_{{elec}}),
  \end{aligned}
\end{equation}
where $g_1$ and $g_2$ are the output functions from the electrical and
the biochemical component respectively.
\begin{equation}\label{eq:comm_signals}
  \begin{aligned}
    &g_{1}(x_{{elec}},x_{chem}) = C_{1} (x_{{elec},i}-C_{2}
    ln(x_{{chem},j})+C_{3}) x_{{elec},k}^2 x_{{elec},l}
    \\
    & g_{2}(x_{{chem}}) = \left\{ \begin{array}{l}
        x_{{chem},j} % & \quad \text{where}
        \\ C_{4} x_{{chem},m} % & \quad \text{where}
      \end{array},\right.
  \end{aligned}
\end{equation}

where $C_{1}..C_{4}$ are the constants; the indices ${i..m}$
correspond to the variables in the according solution vector $x$ at
time $t$:

${i}$ - potential in the spine [V]

${j}$ - calcium concentration in the spine [M]

${k}$ - probability for an s gate being opened (calcium channel
activation)

${l}$ - probability for an r gate begin opened (calcium channel
inactivation)

${m}$ - concentration of active (non-phosphorylated) calcium dependent
potassium channels [M]


% Note, System~\ref{eq:comm_signals} considers the changes of calcium
% equilibrium potential due to the changes in the calcium
% concentration during the simulation. Though in this model it could
% be neglected.

\cite{kubler2000two} defines the coupled integration to be
zero-stable\footnote{Zero-stability is a property of a numerical
  method that guarantees the stability of the discrete system if the
  step size goes to zero.} if algebraic
loops % \footnote{It is said that
% there exists an algebraic loop if any interconnections in the system
% form a closed loop of components, i.e. the output of each component
% depends on the inputs. }
do not exist between the components. Since only the output function of
the electrical component $g_1$ depends on the solution vector of the
chemical component $x_{chem,i}$, we can expect a zero-stability of the
coupled integration\footnote{Among the assumptions made during the
  analysis of a zero-stability was an assumption of a linearity of the
  output functions. The latter does not hold in our system.
}. % \textit{can we?}  Otherwise iterative
% methods or a loop elimination technique can be applied to obtain a
% zero-stability for a coupled discrete system~\citep{kubler2000two}.
% The communication in the system can be simplified by neglecting the
% changes of calcium equilibrium potential in the electrical
% model. Then the functions $g_{1}$ and $g_{2}$ would look like:

% \begin{equation}\label{eq:comm_signals_simplified}
%   \begin{aligned}
%     &g_{1}(y_{\mathrm{elec}}) =
%     C_{1} (y_{\mathrm{elec},i}-C_{5}) y^{2}_{\mathrm{elec},k} y_{\mathrm{elec},l}  \\
%     &g_{2}(y_{\mathrm{chem}}) = C_{4} y_{\mathrm{chem},m}\\
%   \end{aligned}
% \end{equation}

% We decided to use a general case described by
% Equation~\ref{eq:comm_signals}.

\subsubsection*{Implementation}
Both models, electrical and biochemical, have been implemented in
MATLAB\textsuperscript{\textregistered}. The electrical component can
be solved either by the Crank-Nicholson on a staggered grid method or
by the BDF2 method. The solution of the biochemical component can be
obtained either by the RK4 method on a fixed step size grid or by the
BDF2 method. The adaptive step size controller has been used only when
the both electrical and biochemical models have been discretized with
the BDF2 method.

\subsubsection*{Validation and Verification }\label{sec:verification}
The straight forward way to understand whether the solution is valid
can be to use a visual comparison technique. However a rigorous way to
verify the solution is required.

The calcium communication signal during the stimulation has a crucial
impact on the biochemical component in the multiscale system. Calcium
concentration trace shows an oscillating behavior following calcium
current dynamics of the electrical component during the stimulation as
shown on Figure~\ref{fig:refsolu}. We choose the Discrete Fourier
Transform (DFT) of the calcium trace between 3~s and 5~s of the
simulation time to analyze the accuracy of our results. In particular
interest is a zeroth order Fourier coefficient that corresponds to a
summation of the function values. With an appropriate normalization
factor, $n(N)=\frac{1}{N}$, it corresponds to the mean value at the
chosen interval. Since we do not know the analytic solution, we use
our reference solution obtained with \textit{ode15s} (shown on
Figure~\ref{fig:refsolu}) and calculate an error relative to it as:

% During the analysis of the results we use a magnitude of the zeroth
% order Fourier coefficient to estimate an accuracy of the obtained
% solution. Since we do not know the analytic solution, we use our
% reference solution obtained with \textit{ode15s} (shown on
% Figure~\ref{fig:refsolu}) and calculate an error relative to it:
\begin{equation}\label{eq:e_rel}
  e_{Ca}=\frac{||\operatorname{fft0}(\check{x}_{\Delta,Ca})|-|\operatorname{fft0}(x_{\Delta,Ca})||}{|\operatorname{fft0}(\check{x}_{\Delta,Ca})|}\cdot n(N)\cdot 100\quad[\%],
\end{equation}
where $\operatorname{fft0}()$ is a function that returns the first
point of the discrete Fourier transform performed on the solution and
computed with a fast Fourier transform algorithm;
$\check{x}_{\Delta,Ca}$ is the $Ca^{2+}$ solution set on the interval
(3,5)~s obtained with \textit{ode15s}; $x_{\Delta,Ca}$ is the
$Ca^{2+}$ solution set on the same interval calculated with the
analyzed method.

The error at the end of the simulation interval also can be used for
accuracy estimates. We calculate a relative error of \textit{Ka} and
\textit{P\_MAPK} concentration values at time $T$ in the following
way:
% concentrations on our reference solution as well. We can also use
% the relative error of \textit{P\_MAPK} and \textit{Ka} concentration
% values at the end of the simulation interval for our accuracy
% estimates.
\begin{equation}
  e_{Ka}=\frac{|\check{x}_{T,Ka}-x_{T,Ka}|}{\check{x}_{T,Ka}}\cdot 100\quad[\%],
\end{equation}
where $\check{x}_{T,Ka}$ is the $Ka$ solution at time $T$ obtained
with \textit{ode15s}; $x_{\Delta,Ka}$ is the $Ka$ solution at time $T$
calculated with the analyzed method. The same formula is applied to
calculate $e_{\text{\textit{P\_MAPK}}}$.

To look at the efficiency we plot the error versus the number of
function calls of the right hand side of the system of ODEs.


% \subsection{}
\section{Results}\label{sec:results}
% \subsection{An approximation strategy of exchanged variables has an
%   impact on accuracy }
% The RK4 numerical method is an explicit method, thus it does not
% require information from the future time points. The method calculates
% four increments $k_1$...$k_4$ based on the slope estimates at the time
% steps $t_n$,$t_n+h/2$,$t_n+h/2$,$t_n+h$ (Appendix \ref{appx:crk}). We
% used three different approximation strategies (Mode1, Mode2, Mode3) to
% extrapolate $k_{inj}$ in the time points $t_n+h/2$,$t_n+h$
% accordingly.

% The modification used in the CN method requires calculations of the
% variables in a staggered fashion. The membrane voltages are calculated
% at the whole time steps $t_n$, $t_n+h$... while the voltage-dependent
% membrane channel states are computed at the time point shifted by
% $h/2$ (Appendix \ref{appx:cn_staggered}). Thus the method requires
% information only from the half $t_{n}-h/2$ and full time points $t_n$
% in order to proceed to $t_{n}+h/2$ and $t_n+h$ accordingly. We have
% introduced an interpolation of the required variables $g_{K_{Ca}}$,
% $[Ca]$ at time $t_{n}-h/2$ in the Mode1, Mode2 or Mode3.

% Moreover we have observed that the accuracy is influenced rather by an
% accurate approximation of the variables of the fast component
% (electrical in this case). Indeed, slow change course of the
% biochemical component seems to have insignificant impact on the
% electrical dynamics while the fast changing variables of the
% electrical component have a cruicial impact on the biochemical system.

\subsection{ The BDF2-BDF2 coupling outperforms RK4-mCN on a fixed
  step size grid}
We have a stiff electrical component that is solved with the CN
numerical integration method on a staggered grid. The RK4 method is
applied to solve the biochemical
system~\citep{bhalla2011multiscale}. Both methods are applied on a
fixed step size grid. The BDF2 method can be also used on a fixed step
size grid however can be inefficient due to the additional cost of
iterations required for implicit methods. We want to compare two
approximation couplings, BDF2-BDF2 with RK4-mCN, on a fixed step size
grid in order to get a relative estimate for the BDF2 method
efficiency. The comparison was made on three different fixed step size
grids with $h_{elec}=h_{chem}=\{1.125\cdot 10^{-4};~5.625\cdot 10^{-5};~28.125\cdot 10^{-6}\}$
%$4\cdot 10^5,8\cdot 10^5,1.6\cdot 10^6$ number of steps,
, respectively. The first discretization corresponds approximately to
the one required in the fastest component when solved separately. The
results are shown on Figure~\ref{fig:fss-sol}.

\begin{figure}[!htb]
  \centering %\subfigure[Jacobi iteration] {
  \includegraphics[width=0.75\textwidth]{./figures/fixed-jacobi.eps}
  % \label{fig:jacobi}
  % } \\
  %   \subfigure[Gauss-Seidel iteration with chemical subsystem first]
  %   {
  %   \includegraphics[width=0.45\textwidth]{./figures/erkfirst-sol.pdf}
  %   \label{fig:erkfirt}
  % } \subfigure[Gauss-Seidel iteration with electric subsystem first]
  %   {
  %   \includegraphics[width=0.45\textwidth]{./figures/cellfirst-sol.pdf}
  %   \label{fig:cellfirst}
  % }
  \caption{Efficiency comparison between the RK4-mCN ('plus' markers)
    and the BDF2-BDF2 ('square' markers) discretization methods on
    fixed step size grids. The datapoints on each curve correspond to
$h_{elec}=h_{chem}=\{1.125\cdot 10^{-4};~5.625\cdot 10^{-5};~28.125\cdot 10^{-6}\}$, respectively.    
%the number of steps $(4\cdot 10^5,8\cdot 10^5,1.6\cdot 10^6)$.
 The
    dashed lines correspond to the first and second order
    declines. The simulations were performed with Jacobi organization
    between the electrical and the chemical component in the
    system. Second order polynomial was used to approximate exchanged
    variables (Mode 3).}
  \label{fig:fss-sol}
\end{figure}

First, we notice that the lowest order of accuracy in the RK4-mCN
coupling, that is a second order of the mCN method, is not preserved
while the BDF2-BDF2 coupling follows the second-order decline. Then,
the solution obtained with BDF2-BDF2 coupling is more accurate
given equal number of the function
calls. % We also compared Mode~3 with
% Mode~1 in the current simulation setup (not shown). We observed the
% solution solved with RK4-mCN in Mode~3 was more accurate than in
% Mode~1 in average by 0.02\% and the solution solved with BDF2-BDF2 -
% by 0.04\%. However we did not observe any significant difference
% while comparing Jacobi with Gauss-Seidel (electrical system first)
% organizations in Mode~3. Accuracy measurements have been done based
% on the $e_{ca}$ value.

\subsection{PI-controller produces a smoother distribution of step
  sizes}\label{sec:pi-controller}
An optimal behavior of the step size controller is when the step sizes
that have to be taken do not have an extensive variation. Otherwise it
increases the number of times the step size controller has to redo the
step. A smooth distribution of the step sizes defines an good
performance of the controller.

We compare two types of controllers as described in
Section~\ref{sec:adaptive_solver}, I-controller and PI-controller on
Figure~\ref{fig:stepsizeController}. The PI-controller shows a much
smoother step size variation for our system. In our further
observations we are going to apply PI-controller with $a=0.7,b=0.4$.

Note, the stimulation period has to be resolved with very small time
steps. This is also the case for the solution obtained by
\textit{ode15s}. For the PI-controlled solution, 90\% of the taken
steps reside in this time interval.

\begin{figure}[!htb]
  \centering

  \begin{subfigure}[b]{0.45\textwidth}
    \includegraphics[width=\textwidth]{./figures/regular.pdf}
    \label{fig:stepsizeController Regular}
    \caption{The full simulation time interval}
  \end{subfigure}
  \begin{subfigure}[b]{0.45\textwidth}
    \includegraphics[width=\textwidth]{./figures/zoomed-regular.pdf}
    \label{fig:stepsizeController Zoomed}
    \caption{Simulation interval $\lbrack5.5\ s,9\ s\rbrack$}
  \end{subfigure}

  \caption{Comparing the step size taken by the I-controller and
    PI-controller, as a function of simulation time.}
  \label{fig:stepsizeController}
\end{figure}

\subsection{An appropriate approximation of exchanged variables should
  be considered}\label{sec:mode_comparison}
In this experiment we compare the constant extrapolation (Mode~1) with
the second order polynomial extrapolation of exchanged variables
(Mode~3) described in Section~\ref{sec:extrapolation}. We use Jacobi
organization of the components and adaptive step size controller with
the BDF2 approximation method to solve the system.  We present the
relative error of %\textit{P\_MAPK} ($e_{P\_MAPK}$)
% {\text{\textit{P\_MAPK}}}$)
\textit{Ka} ($e_{Ka}$) concentration at the end of the simulation
interval (Figure~\ref{fig:mode_ka}) and the relative error $e_{Ca}$
based on FFT analysis during the stimulation interval
(Figure~\ref{fig:mode_ca}) versus number of function calls to the
right hand side of the system of ODEs.

For the error estimations at the end of the simulations interval
$e_{Ka}$ we observe an asymptotic behavior in Mode~3 for $relTol$
values above $5\cdot 10^{-5}$ and only a first-order of coupling in
Mode~1. The loss of an asymptotic behavior suggests that Mode~1 with
Jacobi organization should be avoided in our system. This is valid for
for both error estimation $e_{Ka}$ and $e_{P\_MAPK}$ (not
shown). Comparing the relative errors calculated during the
stimulation interval $e_{Ca}$ we observe a "sensitive" behavior of the
error on a given range of $relTol$. In both error estimates the
solution solved in Mode~3 appears to be more accurate than in Mode~1.


\begin{figure}[!htb]
  \centering
  % \begin{subfigure}[b]{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/adaptive_pmapk.eps}
  %   \label{fig:mode_pmapk}
  %   \caption{ The accuracy of the solution is based on the
  %   $e_{P\_MAPK}$ relative error measurements. }
  % \end{subfigure}
  \begin{subfigure}[b]{0.45\textwidth}
    \includegraphics[width=\textwidth]{./figures/adaptive_ka.eps}
    \caption{The accuracy of the solution is based on the $e_{Ka}$
      relative error measurements.}
    \label{fig:mode_ka}
  \end{subfigure}
  \begin{subfigure}[b]{0.45\textwidth}
    \includegraphics[width=\textwidth]{./figures/adaptive_fft.eps}
    \caption{The accuracy of the solution is based on the $e_{Ca}$
      relative error measurements.}
    \label{fig:mode_ca}
  \end{subfigure}
  \caption{Efficiency comparison between Mode~1 and Mode~3 with Jacobi
    organization between the components in the system. The datapoints
    on the figures correspond to the $relTol=\{10^{-3};~5\cdot
    10^{-4};~5\cdot 10^{-5};~5\cdot 10^{-6}\}$. The line with the 'plus'
    markers represent an error behavior while solving the system in
    Mode~1, the line with the 'square' markers - the system is solved
    in Mode~3. The dashed lines correspond to the first and second
    order declines. }
  \label{fig:mode_comparison}
\end{figure}

\subsection{Organization of system components can be considered}
\label{sec:organization_comparison}
We expect to have a strong influence of the fast component (electrical
system) on the slow component (biochemical system) during the
stimulation interval. In other words an accurate approximation of the
fast changing variables of the electrical system should be
considered. In this experiment we look at how different organizations
of the components can influence the accuracy of the simulation in our
system. We solve the system with adaptive step size controller and
measure an error corresponding to the $relTol=\{10^{-3};~5\cdot
10^{-4};~5\cdot 10^{-5};~5\cdot 10^{-6}\}$ values. In
Figure~\ref{fig:org_comparison} we present two subfigures based on
both relative error measurements, $e_{Ka}$ and $e_{Ca}$. The results
we observe are consistent with our expectations. Gauss-Seidel
organization in which the electrical component is the leading system
brings the coupled solution solved in Mode~1 to the more accurate
mode.

We also compared different organizations in Mode~3 (not shown). We
noticed a slight superiority of Gauss-Seidel organization with the
electrical component solved first at the datapoint $relTol=1e-3$ while
with the finest values of $relTol$ the difference was almost
indistinguishable. The observation requires further verifications.

% Figure~\ref{fig:changedrelTol} shows the relative error of the
% solution for different system organizations with respect to the
% specified accuracy. Allowing the computed state at each step be
% accurate to within 0.1\% (relTol=1e-3) the proposed method provides
% an accurate solution to 99\%.
% \begin{comment}
%   How to explain that we do not see any difference between
%   organization strategies
% \end{comment}

% The local error arises basically from two kinds of error, a
% discretization error and the error introduced by an approximation of
% the exchanged variables. The former error arises from the numerical
% approximation method, BDF2 in our case. The latter error is
% introduced by the second order polynomial predictor as described in
% the section~\ref{sec:extrapolation}. Since the Gauss-Seidel
% organization requires extrapolation only from one system component,
% we want to use this opportunity to perform as less approximations in
% the solution as possible.  Interestingly, only with the
% $relTol=7.5\cdot 10^{-4}$, Gauss-Seidel organization when the
% electrical system solved first gives us more accurate results.
% \textit{how to explain the influence of the controller choice on the
% extrapolation error?}

% Surprisigly, we do not observe an

% study how an extrapolation error shows up with different accuracy
% requirenment.s with different by comparing Jacobi with Gauss-Seidel
% organizations. % that directly affects
% % the local error control as described in the
% % section~\ref{sec:adaptive_solver}
% . Gauss-Seidel organization letting electrical system to be solved
% first shows ambigous results.  to We derive a conclusion that the
% local error is mosty defined by the discretization error rather than
% by the second order polynomial predictor used to approximate
% exchaged variables at each step.

\begin{figure}[!htbp]
  \centering

  % \begin{subfigure}{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/adaptive_pmapk_org.eps}
  %   \caption{The accuracy of the solution is based on the
  %   $e_{P\_MAPK}$ relative error measurements.}
  % \end{subfigure}
  \begin{subfigure}{0.45\textwidth}
    \includegraphics[width=\textwidth]{./figures/adaptive_ka_org.eps}
    \caption{The accuracy of the solution is based on the $e_{Ka}$
      relative error measurements.}
  \end{subfigure}
  \begin{subfigure}{0.45\textwidth}
    \includegraphics[width=\textwidth]{./figures/adaptive_fft_org.eps}
    \caption{The accuracy of the solution is based on the $e_{Ca}$
      relative error measurements.}
  \end{subfigure}

  \caption{Efficiency comparison between Jacobi ('plus' markers),
    Gauss-Seidel with electrical component solved first ('asterisk'
    markers) and Gauss-Seidel with biochemical component ('circle'
    markers) solved first organizations in Mode~1. The datapoints on
    the figures correspond to the $relTol=\{10^{-3};~5\cdot
    10^{-4};~5\cdot 10^{-5};~5\cdot 10^{-6}\}$. The dashed lines
    correspond to the first and second order declines. }
  \label{fig:org_comparison}
\end{figure}

\subsection{A fixed ratio of step sizes leads to a significant
  efficiency drop}\label{sec:fixed-ratio}
Multiscale problems usually span multiple time scales. The step sizes
required for numerical stability and desired accuracy usually differs
for different system components. One would think about separating
these components or let them run independently in order to gain
efficiency by reducing the number of synchronization points. We
studied this idea by introducing a fixed ratio of steps between the
components in the system $h_{y_1}/h_{y_2}$ for each $H_n$.

The difference in the time scales between electrical and biochemical
components is a factor of 100. We forced electrical component to take
100 steps per one step of a biochemical one before the synchronization
occurs. Unfortunately, this did not reduce the number of
synchronization points significantly, approximately by 2\%. However
the number of function calls was increased dramatically so that it
became inefficient to perform tests at this level.

Furthermore, the lower ratios were applied using Jacobi and
Gauss-Seidel organizations. The results confidently show a desired
decrease of synchronization points payed by a much more significant
increase in a computational cost. A possible explanation for these
results we discuss in Section~\ref{sec:discussion}.

\section{Discussion}\label{sec:discussion}
There have been proposed several strategies for coupling multiple
components on a behavioral level lately
~\citep{bhalla2011multiscale,mattioni2013integration}. One of the main
concerns about these strategies is the lack of a mathematical
justification. Here we bring up a problem of inefficiency and possible
numerical instability that may arise while coupling multiple
components comprising a multiscale system.

First, we introduced an implicit solver, two-step Backward
Differentiation formula (BDF2) as a possible alternative to the
conventional methods used in neuroscience. This numerical method has
appealed to our interest for several reasons. First, it was previously
introduced in a decoupled implicit integration form
by~\cite{skelboe2000accuracy}, its stability properties and error
propagation estimates were evaluated. Besides, suggested error
estimation mechanism for the method allowed us to use an adaptive
time-stepping algorithm. The power and applicability of the proposed
approach was demonstrated on a test multiscale model that was designed
as a prototype of the models used in the scope of interest.

Two components of the test multiscale system, electrical and
biochemical, were solved as a whole with very high $relTol$ and
$absTol$ values using the \textit{ode15s} numerical method in
MATLAB\textsuperscript{\textregistered}. Then the solutions of three
molecule concentrations ($Ca$, $Ka$, $P\_MAPK$) were taken as the
references. Two kinds of error measurements were used to evaluate the
proposed coupling methods: the one based on the FFT analysis during
the stimulation interval ($e_{Ca}$) and the one based on the final
value at the end of the simulation interval ($e_{Ka}$ and
$e_{P\_MAPK}$). Both of the approaches were based on individual
variables estimates. Though the former approach included the part of
the solution in comparison with the latter one that considers only a
point value in its calculations. We evaluate the efficiency of the
obtained solution by presenting the relative error versus the number
of function calls to the right hand side of the systems ODEs. The
latter gave us a good estimate for the computational cost of the
methods.

% The error control is based on the *norm* of the solution. So the
% correct error measure would be the one including all 30+ variables,
% even those which are completely uninteresting. So we do not have
% control over the individual components.  In our estimation approach
% we used both of kinds of relative errors.

We started with comparing the proposed implicit solver with the
conventionally methods used in neuroscience on a fixed step size
grid. We observed that an expected second order accuracy of the
modified Crank-Nicholson method was abandoned after its coupling with
the fourth order accurate Runge-Kutta method. While the accuracy of
the decoupled BDF2 formula was maintained.  We also achieved slightly
more accurate results than with the conventional methods on a given
range of fixed step size grids. These promising results indicated a
further direction of the research.

An adaptive step size algorithm was presented and implemented. We
analyzed different control solutions for an optimal step size
selection. We have focused on P- and PI-controller. PI-controller
showed less variation in the taken step sizes and thus allowed us to
obtain higher performance during the integration.

In Section~\ref{sec:mode_comparison} we tested whether approximations
methods of the exchanged variables between the components matter. We
used two extrapolation strategies: a constant extrapolation (Mode~1)
and a second order polynomial (Mode~3) to approximate the variables
when required. We observed an advantage of Mode~3 over Mode~1 in
respect to the calculated relative error.

We investigated whether different organizations between the components
in our system has any effect on accuracy of the solution. Having a
strong influence of the electrical component on the biochemical during
the stimulation interval we predicted that by letting electrical
component lead the integration we can possibly avoid an approximation
error of the exchanged variables and improve an overall
performance. We observed an expected behavior while solving the system
in Mode~1 and did not notice any significant difference in Mode~3. We
conclude that the second order polynomial used in Mode~3 with Jacobi
organization can be sufficiently accurate as the Gauss-Seidel
organization with an appropriate ordering of system components.

On Figure~\ref{fig:changedrelTol - With Fixed} we present our best
results obtained with an adaptive step size solver comparing to the
solution obtained with the fixed step size methods coupling. First,
the proposed methods allowed us to obtain solution in a much more
efficient way under the minimum required parameters, that is the value
of $relTol$ in the adaptive step size solver and the size of the grid
in the fixed step solver (the first datapoint on each curve). The
value of the relative tolerance $relTol=10^{-3}$ was chosen according
to the order of the smallest solution component. The size of the fixed
step size grid was chosen according to the step size required to
follow the dynamics of the fast system, that is
$h_{elec}=h_{chem}=1.125\cdot 10^{-4}$.
% $10^{-4}$ or approximately $4\cdot 10^{5}$ number of steps
% accordingly.
Second, we have noticed that the coupling of the RK4 with the modified
Crank-Nicholson methods solved the system with the maximum first order
accuracy while the BDF2 coupled integration allowed to obtain only a
second-order accuracy. Though this observation can be system dependent
or even may vary with the chosen method to compare the
errors. Finally, the results obtained with the proposed methods and
evaluated with the FFT error $e_{Ca}$ (the curves with the 'square'
and 'asterisks' markers on Figure~\ref{fig:changedrelTol - With
  Fixed}) are not asymptotically valid on a given range of
$relTol$. One possible solution can be to replace a second order
polynomial predictor in the error estimation algorithm with the lower
order predictor, for instance a linear
predictor~\citep{skelboe2000accuracy}. This will introduce some
over-estimation of the error for each time step and lead to the
smaller step sizes with the cost of efficiency. We propose it to be a
personal choice between the efficiency and the nice monotonic behavior
in global error decay.

 

% organization results number of function calls morphology and other
% parameters

% We compared different organization strategies between the components
% in the multiscale system. We used Jacobi and Gauss-Seidel
% organizations. We did not notice any significant difference in
% respect with both accuracy and efficiency. \textit{what else can we
% talk here about?}.

% As a result, we gained a speed up of the simulations by at least an
% order of a magnitutde while keeping the solution relatively accurate
% (Figure~\ref{fig:changedrelTol - With Fixed}).

\begin{figure}
  \centering
  \includegraphics[width=0.95\textwidth]{./figures/final_fft_new.eps}
  \caption{Efficiency comparison between the proposed methods and the
    methods considered so far in neuroscience to solve multiscale
    problems. The curves with the "square" and "asterisk" markers
    correspond to the error estimates for the solutions obtained with
    the adaptive step size solver and the second order polynomial
    predictor while the curve with the "diamond" markers - an adaptive
    step size solver and the linear predictor used during the error
    estimation step. The curve with the "circle" markes represent the error
    estimates for the solutions solved on the fixed step sie
    grids. The datapoints represent the solutions obtained with
    $relTol=\{10^{-3};~5\cdot 10^{-4};~5\cdot 10^{-5};~5\cdot
    10^{-6}\}$ and on the grid size with
    $h_{elec}=h_{chem}=\{1.125\cdot 10^{-4};~5.625\cdot
    10^{-5};~28.125\cdot 10^{-6}\}$, accordingly. An extra datapoint
    with $relTol=5\cdot 10^{-7}$ is represented for the solution obtained
    with Jacobi organization in Mode~3. 
    %with $relTol=10^{-3};5\cdot 10^{-4};5\cdot
    %10^{-5};5\cdot 10^{-6};5\cdot 10^{-7}\}$, respectively. % The
    % curve with the "diamonds" represent the error estimates The curve
    % with the "circle" markers - with the fixed step size solvers. 
    The dashed lines correspond to the first and second order
    declines.}
  \label{fig:changedrelTol - With Fixed}
\end{figure}

An implemented adaptive time stepping algorithm defines an optimal
integration step size equal for each component in the system so that
information is exchanged each integration time step. Intuitively one
would imagine to use small steps for the fastest changing components
and larger steps in slow components, so called a \textit{multirate
  method}. The multirate approach can potentially reduce the number of
synchronization points between components keeping the discretization
error within acceptable bounds. Trying to mimic the multirate idea we
applied a fixed step size ratio knowing that electrical and
biochemical components have an approximate time ratio 1 to 100. The
outcomes were rather pessimistic. The larger amount of micro steps
used in electrical component causes the macro step to be rejected very
often. Thus, leading to almost insufficient change in the number of
synchronization points and an enormous increase in function
evaluations. One of the possible explanations can be that the error
accumulated between the synchronization points started to dominate
over the discretization error. The latter causes the step size
controller to reduce the length of steps in order to keep the local
error within acceptable bounds.

In our test multiscale system we can determine which system component
sets the limit for the next step size. % The figure~\ref{fig:Limiting
% System} shows which component is more frequently and during which
% interval sets the limit on the next step size.
It is the chemical component that defines the step size during the
larger part of the simulation time. However it is the dynamics of the
electrical component that are computationally expensive due to the
required finer discretization during the stimulation interval. Thus
the electrical component is an active one during the stimulation and
the biochemical is an active component for the rest of the simulation.
Possibly% an altering activity of different components had
% to be taken into consideration in order to gain more
% efficiency. \cite{bartel2002multirate} shows that multirate can be
% achieved in stiff systems as well. Moreover
, considering an altering activity of different components, an
application of the dynamically adjusted ratio may speed up the
simulation.

% \begin{figure}[!htbp]
%   \centering

%   \begin{subfigure}[b]{0.45\textwidth}
%     \includegraphics[width=\textwidth]{./figures/time-limit.pdf}
%     \label{fig:Limiting system - Time}
%     \caption{Simulation time}
%   \end{subfigure}
%   \begin{subfigure}[b]{0.45\textwidth}
%     \includegraphics[width=\textwidth]{./figures/index-limit.pdf}
%     \label{fig:Limiting system - Index}
%     \caption{Time step}

%   \end{subfigure}
%   \caption{The limiting subsystem for a Jacobi iteration with
%   PI-controller.}
%   \label{fig:Limiting System}
% \end{figure}

% \begin{comment}
%   \textit{Any other explanation?}
% \end{comment}


% The results show both solutions, the one solving on a fixed step
% size grid using the RK4-Hines numerical approximation methods ( the
% curve on the Figure~\ref{fig:fss-sol} in red) and the BDF2-BDF2
% combination used with the adaptive step size controller (
% Figure~\ref{fig:changedrelTol} ).  The results show an obvious
% superiority of the proposed coupling method in respect to both
% efficiency and accuracy. \textit{generalization is required}


% \subsubsection*{Error estimation for the RK4 and the mCN methods}
% The RK4 and the mCN methods are numerical approximation methods
% normally used on a fixed step size grid. This class of methods do
% not require an error estimation
% mechanism. % To be able to use these methods
% % to solve multiscale problems efficiently and accurate an error
% % estimation mechanisms should be implemented.

% The usual way to provide an error estimation for a fixed step size
% methods is to use a step doubling
% technique~\citep{press2007numerical}. This technique implies the
% comparison of the two calculated solutions at a full and a half time
% steps. However, then for RK4 the number of function evaluations will
% be increased by a factor of 1.375 and for the mCN method - by a
% factor of 1.5.

% \textit{Runge-Kutta-Fehlberg} method is another approach used
% particular for Runge-Kutta methods~\citep{fehlberg1969low}. The idea
% is that the Runge-Kutta methods of orders four and five used
% together. Then the difference between these two approximations at
% each time step is used as an error estimation. The advantage of this
% method is in a reduced computation cost. The Runge-Kutta-Fehlberg
% method requires only six evaluations per time step, that is
% approximately 40\% decrease in the number of function evaluations in
% comparison with the pair of arbitrary fourth- and fifth-order
% methods.


% \textit{Multirate methods} are usually applied to the systems
% encompass different time scales.w Small steps are used for the
% fastest changing components and larger steps in slow
% components. % By introducing a finer grid in a fast system we could contribute to
% % the reduction of the discretization error. However i The local
% % error
% % seems to be bounded rather by the extrapolation error of the
% % exchanged variables, this approach should not reduce the number
% % of
% % synchronization points but rather increase computation cost. This
% % is
% % the results we observe. \textit{why then we do not see the
% % difference between the organization strategies?}
% Moreover, % More considerations are required
% % regarding this part of simulaion.  frequently set the limit on
% % the
% % step size.  Perhaps having an adaptive ratio between the
% % components
% % could benefit the results. Some of the "self-adjusting" multirate
% % time stepping strategies are described
% % in~\cite{savcenco2008multirate}.  Note that this that is
% % consistent
% % with the results presented in section~\ref{sec:pi-controller}.

% % Finally, the choice of an approximation strategy of the exhcanged
% % variables in the intermideate steps has a crucial impact on the
% % stability, the order of convergence and can lead to the effects
% % we
% % observe in section~\ref{sec:fixed-ratio}.

We have applied the proposed methods for a system in which the most
computational expensive component is active only 1/9 of the total
simulation time. An application of the proposed methods to a larger
biological systems should be considered in the future. The proposed
coupling methods and conclusions are valid for the systems given by
ODEs and DAEs. The paper did not address the numerical questions and
problems that can arise while coupling the models described by
different mathematical formalisms, for instance
stochastic-deterministic coupling.

\section*{Acknowledgments}
\begin{comment}
  1. Do any results look suspicious?  2. Do we want to add something
  more or redo some results?  3. Stronger motivation for BDF2 method,
  describe its stability region, adaptive step size controller?
  4. Instability signs, can we see it in P-controller? 5. How general
  our method can be? 6. Interpretation of the influence of the
  controller choice on the efficiency of different organizations.
\end{comment}
\
\clearpage
\begin{appendices}
  \section{Numerical methods}
  We focus on class of numerical methods destined to solve ODEs with a
  given initial value (initial value problem (IVP).  Given an IVP in
  the form:
  \begin{equation}
    y'=f(t,y),\qquad y(t_0) = y_0 \label{eq:IVP}
  \end{equation}

  then the easiest way to approximate a solution is to use a Forward
  Euler method (Explicit):
  \begin{equation}
    y_{n+1} \approx y_n + hf(t_n,y_n)
  \end{equation}
  Here, $y_n$ is the approximated solution at time point $n$; $h$ is
  the size of the time step and $t_n = hn$ is the $n$:th time. The
  Forward Euler method is a first order numerical method, which means
  that the local error is proportional to the step size of
  discretization $h$.
  \subsection{Crank-Nicholson on a staggered
    grid}\label{appx:cn_staggered}
  In 1983, Michael Hines formulated an efficient method for solving
  arbitrarily branched nerve equations with Hodgkin-Huxley (HH)
  kinetics\citep{hines1984efficient}. The formulated idea uses the
  second-order implicit Crank-Nicholson (CN) finite difference
  numerical method to approximate membrane voltages at the whole time
  steps and HH membrane conductances at the midpoint of the time
  steps. A general form of a system and its discretization is
  represented by~\eqref{eq:nonlinear} and~\eqref{eq:cn_staggered}
  respectively.
  \begin{equation}\label{eq:nonlinear}
    \begin{aligned}
      &x'=A(y)x+b(y)\\
      &y'=c(x)+D(x)y,
    \end{aligned}
  \end{equation}

  \begin{equation}\label{eq:cn_staggered}
    \begin{aligned}
      &x_{n+1/2}=x_{n-1/2}+h[A(y_n)x_n+b(y_n)]\\
      &y_{n+1}=y_{n}+h[c(x_{n+1/2})+D(x_{n+1/2})y_{n+1/2}]\\
    \end{aligned}
  \end{equation}

  After replacing $x_n$ and $y_{n+1/2}$ using the midpoint rule and
  rearranging~\eqref{eq:cn_staggered} the following system is given:
  % \begin{equation}\label{eq:cn_staggered}
  %   \begin{aligned}
  %     &x_{n+1/2}=x_{n-1/2}+h[A(y_n)\frac{x_{n+1/2}+x_{n-1/2}}{2}+b(y_n)]\\
  %     &y_{n+1}=y_{n}+h[c(x_{n+1/2})+D(x_{n+1/2})\frac{y_{n+1}+y_{n-1}}{2}]\\
  %   \end{aligned}
  % \end{equation}

  \begin{equation}\label{eq:cn_staggered_mp}
    \begin{aligned}
      &(I-\frac{h}{2}A(y_n))x_{n+1/2}=(I+\frac{h}{2}A(y_n))x_{n-1/2}+hb(y_n)\\
      &(I-\frac{h}{2}D(x_{n+1/2}))y_{n+1}=(I+\frac{h}{2}D(x_{n+1/2}))y_{n}+hc(x_{n+1/2})\\
    \end{aligned}
  \end{equation}

  Let $x$ correspond to the membrane channel state variable and $y$to
  the membrane voltage variable. Note, then only a second-order
  correct value of the membrane voltage at the midpoint time $t$ is
  required to integrate from $t-1/2h$ to $t+1/2h$ the
  voltage-dependent channel states.

  This improvement allows the cable equation to be cast in a linear
  form% Then no interations that are normally
  % required to maintain the accuracy are no longer required and
  % second-order accuracy of the Crank-Nicholson method is preserved.
  , maintains second order accuracy of the method and eliminates the
  need of the expensive iterative approximation that an implicit
  method often requires.

  Crank-Nicholson method is a second-order accurate method in time,
  implicit and \textit{A-stable}, that is its stability domain
  includes the entire half-left plane in the complex plane.
  \subsubsection{Stability domain of the Crank-Nicholson method on a
    staggered grid}\label{appx:stability_cn_staggered}


  % For a system with only one compartment having a potential ${V}$
  % and one type of gate ${p}$, the Hines method would have the form:

  % \begin{equation}\label{eq:hines}
  %   \begin{aligned}
  %     p_{n+1/2}&=p_{n-1/2}+h\left(\alpha(V_n)-\beta(V_n)\frac{p_{n+1/2}+p_{n-1/2}}{2}\right)
  %     \\
  %     V_{n+1}&=V_n
  %     \frac{h}{C}\left(\frac{2E-V_n-V_{n+1}}{2}g(p_{n+1/2})\right)
  %   \end{aligned},
  % \end{equation}
  % where ${n+1/2}$ and ${n-1/2}$ are indices for half time steps.

  \subsection{Classical Runge-Kutta}\label{appx:crk}
  % If the right hand side of Equation \ref{eq:IVP} would be
  % independent of $y$, the IVP would have the exact solution
  % \begin{equation}
  %   y(X) = y_0+\int_{x_0}^Xf(x)\, dx
  % \end{equation}
  % The integral can for example be approximated by the midpoint rule
  % \begin{equation}
  %   y(X)\approx y_{n-1} +
  %   h_{n-1}f\left(x_{n-1}+\frac{h_{n-1}}{2}\right)
  % \end{equation}
  % which is a second order approximation. Here, $h_i = x_{i+1}-x_i$
  % and $x_0,x_1,...,x_n=X$ is a subdivision of the integrated
  % interval.

  % The increased order of accuracy inspired Runge (1895) to try to
  % extend this method to approximate the original problem in Equation
  % \ref{eq:IVP}.

  % The first step of this new method, would be expressed as
  % \begin{equation}
  %   y(x_0+h)\approx
  %   y_0+hf\left(x_0+\frac{h}{2},y\left(x_0+\frac{h}{2}\right)\right)
  % \end{equation}
  % The question now is how to approximate
  % $y\left(x_0+\frac{h}{2}\right)$. Again, the Explicit Euler is the
  % easiest approach, which results in the approximation
  % \begin{equation}\label{eq:RK2}
  %   \begin{aligned}
  %     &k_1 = f(x_0,y_0)\\
  %     &y_1 = y_0 + hf\left(x_0 + \frac{h}{2}, y_0
  %       +\frac{h}{2}k_1\right)
  %   \end{aligned}
  % \end{equation}
  % This can be shown to be a second order approximation, i.e. a
  % method of higher order than the Explicit Euler.

  In 1901, Kutta applied the midpoint rule and the explicit Euler
  approximation to the original problem in Equation~\ref{eq:IVP} and
  thus introduced a class of numerical methods differ by the order of
  accuracy.

  One of the Runge-Kutta methods has been more popular than the others
  in neuroscience. It is the fourth order Runge-Kutta method shown in
  Equation \ref{eq:RK4}, and is often referred to as Classical
  Runge-Kutta method.
  \begin{equation} \label{eq:RK4} y_{n+1} \approx y_n +
    \frac{h}{6}\left( k_1 + 2k_2 + 2k_3 + k_4\right)
  \end{equation}
  where
  \begin{equation}\label{eq:RK coeffs}
    \begin{aligned}
      &k_1 = f(t_n,y_n)\\
      &k_2 = f\left(t_n+\frac{h}{2},y_n+\frac{h}{2}k_1 \right) \\
      &k_3 = f\left(t_n+\frac{h}{2},y_n+\frac{h}{2}k_2 \right) \\
      &k_4 = f\left(t_n+h,y_n+hk_3 \right)
    \end{aligned}
  \end{equation}

  The Classical Runge-Kutta method is an explicit numerical method
  known to have a bounded stability region. For a stiff problem this
  causes solution to oscillate if small enough discretization time step
  has not been chosen.

  \subsection{Backward Differentiation Formula}\label{appx:bdf2}
  Equation \ref{eq:IVP} can be solved either using integral approach
  or using methods based on differentiation, so called Backward
  differentiation Formula (BDF) methods. These methods were introduced
  by~\cite{curtiss1952integration}.

  % Backward differentiation formulas form a class of methods differ
  % by the $k$ number of approximated previous steps it considered in
  % the interpolation polynomial and by the stability properties.  are
  % based on the contructing an interpolation polynomial having $k$
  % approximations of the previous steps. The number following the
  % method name correspons to the number of interoplation points it
  % should consider.

  % Assume that there exist approximations to the ODE in Equation
  % \ref{eq:IVP}, $y_{n-k+1},...,y_n$ for previous time steps. Using
  % these, we can construct a interpolation polynomial $q(x)$ as
  % \begin{equation}
  %   q(x) = q(x_n + sh) =
  %   \sum_{j=0}^k(-1)^j\left(\begin{tabular}{c}$-s+1$ \\ $j$
  %     \end{tabular} \right)\nabla^jy_{n+1}
  % \end{equation}

  % The unknown value $y_{n+1}$ will be determined such that the
  % polynomial $q(x)$ satisfy the differential equation in at least
  % one grid point, i.e.
  % \begin{equation}
  %   q'(x_{n+1-r})=f(x_{n+1-r},y_{n+1-r})
  % \end{equation}
  % The choice $r=1$, gives after some manipulations an implicit
  % formulation on the form
  % \begin{equation}
  %   \sum_{j=1}^k\frac{1}{j}\nabla^jy_{n+1}=hf_{n+1}
  % \end{equation}
  % for a uniform distributed grid with step size $h$. The number $k$
  % determines how many points the interpolation should contain.

  % In this thesis, we are mainly interested in the two-step backward
  % differentiation formula, called BDF2. %


  Backward differentiation formulas with an order less than three are
  usually used for stiff systems due to their property of being
  A-stable. Therefore we are interested in the second-order backward
  differentiation formula (BDF2). On a equidistributed grid BDF2
  formula has the form:
  \begin{equation}
    \frac{2}{3}y_{n+1}-2y_{n} + \frac{1}{2}y_{n-1} = hf_{n+1}
  \end{equation}
  On a non-uniform grid the BDF2 method can be formulated as:
  \begin{equation}
    y_{n+1}=\alpha_1y_n+\alpha_2y_{n-1}+\beta h_{n+1}f(t_{n+1},y_{n+1})
  \end{equation}
  where
  \begin{align}
    &\gamma_{n+1}=h_{n+1}/h_n\\
    &\alpha_1=1-\alpha_2\\
    &\alpha_2 = -\gamma_{n+1}^2/(2\gamma_{n+1}+1)\\
    &\beta=(\gamma_{n+1}+1)/(2\gamma_{n+1}+1)
  \end{align}
  % Then Newton-type interations methods are usually applied to solve
  % nonlinearity.

  % Unfortunately, stability results of the BDF2 method are only known
  % for constant step sizes.

  % On an uniform grid, the method is A-stable and has a stability
  % domain shown in Figure \ref{fig:bdf-stab-domain}. It can
  % analytically be shown that the root locus curve $\mu$ has no roots
  % on the left hand part of the complex plane. The root locus curve
  % is given by
  % \begin{equation}
  %   \mu = (1-\exp(-i\theta))+\frac{1}{2}(1-\exp(-i\theta)^2)
  % \end{equation}
  % This has the real part of
  % \begin{equation}
  %   Re(\mu) = \frac{3}{2}-2\cos(\theta)+ \frac{1}{2}
  %   \cos(2\theta)\geq0
  % \end{equation}
  % and the A-stability is given.

  % \begin{figure}
  %   \centering
  %   \includegraphics[width=0.45\textwidth]{figures/stabilityRegionBDF.png}
  %   \caption{The stability domain of the BDF2 method lies outside
  %   the plotted curve.}
  %   \label{fig:bdf-stab-domain}
  % \end{figure}
  \section{An example of instability in the coupled system}
  \label{appx:instability}
  \section{Mathematical formulation of the system components}
\label{appx:models}
\subsection{Electrical component}\label{appx:electrical}
The electrical component of the modeled system can be described by the
electrical equivalent circuits of the neurons's compartments shown on
Figure~\ref{fig:eec_neuron} and according system of
equations~\eqref{eq:ec}.
\begin{figure}[htb!]
  % \begin{subfigure}[c]{0.8\textwidth}
  %   \includestandalone[width=\textwidth]{figures/cell_iec}
  %   \caption{Electrical circuit of a cell}
  % \end{subfigure}
  % \par\bigskip
  \begin {subfigure}[l]{0.95\textwidth}
    \centering
    \includestandalone[scale=0.6]{figures/soma_iec}
    \caption{Soma}
  \end{subfigure}
  \par\bigskip

  \begin {subfigure}[l]{0.95\textwidth}
    \centering
    \includestandalone[scale=0.6]{figures/dendrite_iec}
    \caption{Dendrite ($\mathrm{i_{th}}$ subcompartment)}
  \end{subfigure}
  \par\bigskip

  \begin {subfigure}[l]{0.95\textwidth}
    \centering
    \includestandalone[scale=0.6]{figures/spine_iec}
    \caption{Spine}
  \end{subfigure}
  \caption{Electrical equivalent circuits of the neuron's
    compartments.}
  \label{fig:eec_neuron}
\end{figure}


\begin{equation}
  \begin{aligned}
    &C_i\frac{dV_i}{dt}=\sum_{j\in\mathcal{N}_i}\frac{
      (V_j-V_i)}{R_{a_{ij}}}+\frac{(E_{m_i}-V_i)}{R_{m_i}}+\sum_{s\in
      \mathcal{M}_i}f_s(V_i,[S]_{in},[S]_{out})g_s(p_a,p_i)+I_{inj_i}\\
    &\frac{dp}{dt}=\alpha_p(V_i)-(\alpha_p(V_i)+\beta_p(V_i))p,
  \end{aligned}
  \label{eq:ec}
\end{equation}
where $p \in \{p_a,p_i\},\,p_a \in \{m,n,r\},\,p_i \in \{h,s\}$.

The functions $f_s$ and $g_s$ are an ion channel type dependent. The
definition can be found in Table~\ref{tbl:fs_fun}.

\begin{table}[h!]
  {\scriptsize
    \noindent
    \begin{tabular}{| l | l | l | l |}
      \hline
      Compartment name & Ion Channel (s) & $f_s$ & $g_s$ \\ \hline
      \multirow{2}{*}{soma} & Sodium (Na)& $(E_{Na}-V_1)$& $\bar g_{Na}m^3h$\\
      & Potassium (K)& $(E_K-V_1)$ & $\bar g_{K}n^4$ \\ \hline
      \multirow{2}{*}{spine} & Calcium (Ca)& $(\frac{R K}{2F} log\frac{[Ca]_{out}}{\mathbf{[Ca]_{in}}}-V_{3})$ & $\bar g_{Ca} r s^2$ \\
      & Calcium dependent
      potassium ($K_{Ca}$) & $(E_K-V_{3})$& $\bar g_{K_{Ca}}\mathbf{\frac{[Ka]}{[Ka]^*}}$\\
      \hline
    \end{tabular}
  }
  \caption{Description of the ion channel types in each compartment if
    exist with the given functions $f_s$ and $g_s$ accordingly.}
  \label{tbl:fs_fun}
\end{table}



% \begin{equation}
%   \begin{aligned}
%     f_{Ca}&=(\frac{R K}{2F} log\frac{[Ca]_{out}}{[Ca]_{in}}-V_3)\\
%     g_{Ca}&= \bar g_{Ca} \cdot p_1 \cdot p_2^2\\
% %     \alpha_{p_1}&=\left\{
% %       \begin{array}{l l}
% %         0.005/\exp(0.05(V_3+0.07)) & \quad \text {if $V_3 >
% %         -0.07$} \\
% %         0.005 & \quad \text {otherwise}
% %       \end{array}
% %     \right. \\
% %     \beta_{p_1}&=0.005-\alpha_{p_1}(V_3)\\
% %     \alpha_{p_2}&=1.6/(1+\exp(-(0.005+V_3)/0.01389))\\
% %     \beta_{p_2}&=\left\{
% %       \begin{array}{l l}
% %         0.005(1-(V_3+0.0189)/0.005/2) & \quad \text {if
% %         $|(V_3+0.0189)/0.005| < 10^{-6}$} \\
% %         (V_3+0.0189)/(\exp((V_3+0.0189)/0.005)-1) & \quad \text
% %         {otherwise}
% %       \end{array}
% %     \right. \\\\
%   \end{aligned}
% \end{equation}
% \begin{equation}
%   \begin{aligned}
%     f_{K_{Ca}}&=(E_K-V_3)\\
%     g_{K_{Ca}}&=\bar g_{K_{Ca}}\mathbf{\frac{K_{Ca}}{K_{total}}}
%   \end{aligned}
% \end{equation}

The rate constatns $\alpha_p$ and $\beta_p$ at which closed gate $p$
transits to an open state and at which open gate $p$ transits to the
closed state respectively are voltage dependent and are calculated
according to the formulas from Tabel~\ref{tbl:rates}.
\begin{table}[h!]
  {\scriptsize
    \noindent
    \begin{tabular}{|l|l|}
      \hline
      Rate constant, $\alpha_p(V_i)$ & Rate constant, $\beta_p(V_i)$\\
      \hline 
      $\alpha_h=\frac{70.0}{\exp((65.0+V_1)/20.0)}$ & $\beta_h=\frac{1000.0}{1+\exp((-35.0-V_1)/10.0)}$\\[20pt]
      $\alpha_m=
      \left\{ \begin{array}{l l}
          10.0(1+2(V_1+40.0)/10.0) & \quad \text{if $\left|\frac{-(V_1+40.0)}{10.0}\right| < 1e-6,$}\\ 
          \frac{100.0(-V_1-40.0)}{\exp((-V_1-40.0)/10.0)-1} & \quad \text{if $\left|\frac{-(V_1+40.0)}{10.0}\right| \geq 1e-6.$ }
        \end{array}\right.$
      & $\beta_m=\frac{4000.0}{\exp((65.0+V_1)/18.0)}$\\[20pt]
      $\alpha_n=
      \left\{ \begin{array}{l l}
          10.0(1+2(V_1+55.0)/10.0) & \quad \text{if $\left|\frac{-(V_1+55.0)}{10.0}\right| < 1e-6,$}\\ 
          \frac{10.0(-V_1-55.0)}{\exp((-V_1-55.0)/10.0)-1} & \quad \text{if $\left|\frac{-(V_1+55.0)}{10.0}\right| \geq 1e-6.$ }
        \end{array} \right.$
      & $\beta_n=\frac{125.0}{\exp((65.0+V_1)/80.0)}$\\[20pt]
      $\alpha_r=\left\{ \begin{array}{l l}
          5.0 & \quad \text{if $V_3 \leq -70.0,$}\\ 
          \frac{5.0}{\exp(0.05(V_3+70.0))} & \quad \text{if $V_3>-70.0.$}
        \end{array}\right.$ & $\beta_r=5-\alpha_r$\\[20pt]
      $\alpha_s=\frac{1600.0}{1+\exp((-5.0-V_3)/13.89)}$ & 
      $\beta_s=
      \left\{ \begin{array}{l l}
          5.0(1-2(V_3+18.9)/5.0) & \quad \text{if $\left|\frac{V_3+18.9}{5.0}\right| < 1e-6,$}\\ 
          \frac{20.0(V_3+18.9)}{\exp((V_3+18.9)/5.0)-1} & \quad \text{if $\left|\frac{V_3+18.9}{5.0}\right| \geq 1e-6.$ }
        \end{array}\right.$\\
      \hline
    \end{tabular}
  }
  \caption{Voltage dependent rate functions for different
    gates. Voltage is expected in [mV] units.}
  \label{tbl:rates}
\end{table}

The membrane capacitance $C$ of the compartment $i$ and the axial
resistance betwen the compartments $i$ and $j$ is given
by~\eqref{eq:ci} and \eqref{eq:ra} accordingly.
\begin{equation}
  C_i=C_M\cdot A_i
  \label{eq:ci}
\end{equation}
\begin{equation}
  R_{a_{ij}}=\frac{R_{a_i}+R_{a_j}}{2},
  \label{eq:ra}
\end{equation}
where $R_{a_i}=\frac{4.0l_iR_A}{\pi d_i^2}$. The definitions and the
values of the parameters can be found in Table~\ref{tbl:dims} and
Table~\ref{tbl:el_params}.

\begin{table}[h!]
  \centering
  {\scriptsize

  \begin{tabular}{|l|l|c|l|c|c|c|}
    \hline
    index & Compartment name& \# of subcompartments & Shape & Length ($l_i$) [$\mu$m] & Diameter ($d_i$) [$\mu$m] & Area ($A_i$)\\\hline
    1 &Soma &- &sphere&  -	&30& $\pi d_1^2$\\
    2 &Dendrite &15 &cylinder 	& 500&1& $\pi d_2l_{2}$	\\
    3 &Spine &-&sphere& -	& 1& $\pi d_3^2$\\
    \hline
  \end{tabular}
}
\caption{Geometric dimensions of the modeled neuron.} 
\label{tbl:dims}
\end{table}


\begin{table}[h!]
  {\scriptsize
    \noindent
    \begin{tabular}{|c|l|c|c|}
      \hline
      Parameter&Name&Value&Unit
      \\ \hline
      $C_M$&Specific membrane capacitance&$0.01$&$\frac{F}{\mathrm{m}^2}$\\
      % $C_2$&Dendrite membrane capacitance&$2.0\times10^{-11}$&F\\
      % $C_3$&Spine membrane capacitance&$0.004\times10^{-11}$&F\\
      $R_A$&Specific axial resistance&$0.354$&$\Omega\cdot$m\\
      $E_{m_1}=E_{m_2}=E_{m_3}$&Membrane leakage potential&-0.0594&V\\
      $R_{m_1}$&Soma membrane resistance&$8.333\times10^{-8}$&$\Omega$\\
      $R_{m_2}$&Dendrite membrane resistance&$1.5\times10^{-9}$&$\Omega$\\
      $R_{m_3}$&Spine membrane resistance&$7.5\times10^{-11}$&$\Omega$\\
      % $R_{a_2}$&Dendrite axial
      % resistance&$500.0\times10^{-6}$&\Omega\\
      % $R_{a_3}$&Spine axial resistance&$1.0\times10^{-6}$&\Omega\\
      $E_{Na}$&Nernst equilibrium sodium potential&0.05&V\\
      $E_{K}$&Nernst equilibrium potassium potential&-0.077&V\\
      % $E_{Ca}$&Nernst equilibrium calcium potential&0.07&V\\
      $\bar{g}_{Na}$&Maximum sodium conductance&$7.4\times10^{-7}$&S\\
      $\bar{g}_{K}$&Maximum potassium conductance&$7.4\times10^{-8}$&S\\
      $\bar{g}_{Ca}$&Maximum calcium conductance&$6.5\times10^{-12}$&S\\
      $\bar{g}_{K_{Ca}}$&Maximum conductance of $Ca^{2+}$-dependent K-channels&$3.2\times10^{-10}$&S\\
      $I_{inj_1}$&Current injected to soma&$0.09\times10^{-9}$&A\\
      $I_{inj_2}=I_{inj_3}$&Injected current to the other compartments&0.0&A\\
      $[Ca]_{out}$&Concentration of $Ca^{2+}$ in the extracellular fluid&$2.0\times10^{-3}$&M\\
      $F$& Faraday's constant &$9.6485309\times10^4$&$\mathrm{C}\cdot \mathrm{mol}^{-1}$\\
      $K$& Temperature in Kelvin &279.45&K\\
      $R$& Universal gas constant&8.31441&$\mathrm{J}\cdot \mathrm{K}^{-1}\cdot \mathrm{mol}^{-1}$\\
      \hline
    \end{tabular}
  }
  \caption{Electrical model parameter values and descriptions.}
  \label{tbl:el_params}
\end{table}

 
\subsection{Biochemical component}\label{appx:biochemical}
The biochemical model is described by the set of chemical reaction
rate equations represented in Tabel~\ref{tbl:rreq}. Each molecule
concentration then can solved with the according ODE (32) - (49). The
molecule index and its initial concentration value can be found in
Table~\ref{tbl:init_values}.

\begin{table}[h!]
{\scriptsize
  \begin{tabular}{|@{\makebox[1.5em][r]{\rownumber \space}} |l|c|c|c|}
\hline
 \multicolumn{1}{l}{Reaction} &  {$k_\alpha\mathrm{ [1/(M\cdot s)]}$} & {$k_\beta\mathrm{ [1/(M\cdot s)]}$} & {$k_\gamma\mathrm{ [1/(M\cdot s)]}$}\\
\hline
% 1. 2Ca+Raf<=>Active_Raf
% 2. Active_Raf+MAPK<=>AM=>P_MAPK+Active_Raf
% 3. P_MAPK+Phosphotase<=>PP=>MAPK+Phosphotase
% 4. P_MAPK+K_A<=>PK=>P_K_A+P_MAPK
% 5. P_K_A=>K_A
% 6. PKC+2AA<=>Act_PKC
% 7. AA<=>APC
% 8. P_MAPK+APC<=>PA=>AA+P_MAPK
% 9. MAPK+Act_PKC<=>MA=>P_MAPK+Act_PKC
% 10. Ca+pmca<=>pmcaCa=>pmca
\qquad
\ce{Ca^2+ + Raf
 <=>[k_\alpha][k_\beta]{\ce{Active\_Raf}}}
&4e12 & 8.0 &-\\
\qquad
\ce{Active\_Raf + MAPK
 <=>[k_\alpha][k_\beta]\ce{\text{Active\_Raf--MAPK}} ->[k_\gamma][]\ce{Active\_Raf + P\_MAPK  }}
&0.025090663e8 & 40.0 &10.0\\
\qquad
\ce{Phosphotase + P\_MAPK 
 <=>[k_\alpha][k_\beta]\ce{\text{Phosphotase--P\_MAPK}} ->[k_\gamma][]\ce{Phosphotase + MAPK}}
& 0.501831326e8 &0.4 &0.1\\
\qquad
\ce{P\_MAPK + Ka
 <=>[k_\alpha][k_\beta]\ce{\text{P\_MAPK--Ka}} ->[k_\gamma][]\ce{P\_MAPK + P\_Ka }}
&0.050184337e8 &40.0 &10.0\\
\qquad
\ce{P\_Ka ->[k_\alpha][]\ce{Ka}}
&0.05 & - &-\\
\qquad
\ce{PKC + 2AA
 <=>[k_\alpha][k_\beta]{\ce{Active\_PKC}}}
&1e12 &2.0 &-\\
\qquad
\ce{AA <=>[k_\alpha][k_\beta]\ce{APC}}
&0.2 &0.01 &-\\
\qquad
\ce{P\_MAPK + APC
 <=>[k_\alpha][k_\beta]\ce{\text{P\_MAPK--APC}} ->[k_\gamma][]\ce{P\_MAPK + AA}}
&0.250918674e8 &20.0 &5.0\\
\qquad
\ce{Active\_PKC + MAPK
 <=>[k_\alpha][k_\beta]\ce{\text{Active\_PKC--MAPK}} ->[k_\gamma][]\ce{Active\_PKC + P\_MAPK}}
& 0.050184337e8 &4.0 &1.0\\
\qquad
\ce{PMCA + Ca
 <=>[k_\alpha][k_\beta]\ce{\text{PMCA--Ca}} ->[k_\gamma][]\ce{PMCA}}
&0.06e9 &7.0 &5.0\\
\hline
\end{tabular}
}
\caption{The reaction scheme of the biochemical model and according
  rate values $k_{\alpha},~k_{\beta}$ and $k_{\gamma}$.}
\label{tbl:rreq}
\end{table}


{\scriptsize
  \begin{align}
    &\frac{d[S_1]}{dt}=-2k_{\alpha_1}[S_1]^2[S_2]+2k_{\beta_1}[S_3]-k_{\alpha_{10}}[S_1][S_{17}]+k_{\beta_{10}}[S_{18}]+\mathbf{k_{inj}}\\
    &\frac{d[S_2]}{dt}=-k_{\alpha_1}[S_1]^2[S_2]+k_{\beta_1}[S_3]\\
    &\frac{d[S_3]}{dt}=k_{\alpha_1}[S_1]^2[S_2]-k_{\beta_1}[S_3]-k_{\alpha_2}[S_3][S_4]+(k_{\beta_2}+k_{\gamma_2})[S_5]\\
    &\frac{d[S_4]}{dt}=-k_{\alpha_2}[S_3][S_4]+k_{\beta_2}[S_5]+k_{\gamma_3}[S_8]-k_{\alpha_9}[S_4][S_{13}]+k_{\beta_9}[S_{16}]\\
    &\frac{d[S_5]}{dt}=k_{\alpha_2}[S_3][S_4]-(k_{\beta_2}+k_{\gamma_2})[S_5]\\
    &\frac{d[S_6]}{dt}=k_{\gamma_2}[S_5]-k_{\alpha_3}[S_6][S_7]+k_{\beta_3}[S_8]-k_{\alpha_4}[S_6][S_9]+(k_{\beta_4}+k_{\gamma_4})[S_{10}]-k_{\alpha_8}[S_6][APC]+(k_{\beta_8}+k_{\gamma_8})[S_{15}]+k_{\gamma_9}[S_{16}]\\
    &\frac{d[S_7]}{dt}=-k_{\alpha_3}[S_6][S_7]+(k_{\beta_3}+k_{\gamma_3})[S_8]\\
    &\frac{d[S_8]}{dt}=k_{\alpha_3}[S_6][S_7]-(k_{\beta_3}+k_{\gamma_3})[S_8]\\
    &\frac{d[S_9]}{dt}=-k_{\alpha_4}[S_9][S_6]+k_{\beta_4}[S_{10}]+k_{\alpha_5}[S_{11}]\\
    &\frac{d[S_{10}]}{dt}=k_{\alpha_4}[S_9][S_6]-(k_{\beta_4}+k_{\gamma_4})[S_{10}]\\
    &\frac{d[S_{11}]}{dt}=k_{\gamma_4}[S_{10}]-k_{\alpha_{5}}[S_{11}]\\
    &\frac{d[S_{12}]}{dt}=-k_{\alpha_6}[S_{12}][S_{14}]+k_{\alpha_{6}}[S_{13}]\\
    &\frac{d[S_{13}]}{dt}=k_{\alpha_6}[S_{14}]^2[S_{12}]-k_{\beta_6}[S_{13}]-k_{\alpha_9}[S_4][S_{13}]+(k_{\beta_9}+k_{\gamma_9})[S_{16}]\\
    &\frac{d[S_{14}]}{dt}=-2k_{\alpha_6}[S_{14}]^2[S_{12}]+2k_{\beta_6}[S_{13}]-k_{\alpha_7}[S_{14}]+k_{\beta_7}[APC]+k_{\gamma_8}[S_{15}]\\
    &\frac{d[S_{15}]}{dt}=k_{\alpha_8}[S_6][APC]-(k_{\beta_8}+k_{\gamma_8})[S_{15}]\\
    &\frac{d[S_{16}]}{dt}=k_{\alpha_9}[S_4][S_{13}]-(k_{\beta_9}+k_{\gamma_9})[S_{16}]\\
    &\frac{d[S_{17}]}{dt}=-k_{\alpha_{10}}[S_1][S_{17}]+(k_{\beta_{10}}+k_{\gamma{10}})[S_{18}]\\
    &\frac{d[S_{18}]}{dt}=k_{\alpha_{10}}[S_1][S_{17}]-(k_{\beta_{10}}+k_{\gamma_{10}}[S_{18}]
  \end{align}
  \label{tbl:chem_eqs}

}


\begin{table}[h!]
{\scriptsize
  \begin{tabular}{|@{\makebox[1.5em][r]{\rownumber\space}} | l | c|}
    \hline
    \multicolumn{1}{l}{Molecule (\(\text{S}_i \))} & {Initial concentration [M]} \\
    \hline
    Ca & $48/N_A/vol$ \\ %ca 1.
    Raf &  $600/N_A/vol$ \\ %raf      2.  
    Active\_Raf &  0.0 \\   %araf         3.
    MAPK &    1e-6 \\ %mapk        4.
    Active\_Raf--MAPK &    0.0 \\ %mapk_araf      5.
    P\_MAPK &    0.0 \\%pmapk          6.
    Phosphatase &    $300/N_A/vol$ \\ %phsph    7.
    Phosphotase--P\_MAPK &    0.0 \\ %pmapk_phsph    8.
    Ka &    $600/N_A/vol$ \\ %ka        9.
    P\_MAPK--Ka &   0.0 \\ %ka_pmapk       10.
    P-Ka &    0.0 \\ %pka            11.
    PKC   & 1e-6 \\ %pkc          12.
    Active\_PKC &    0.0 \\  %apkc          13.
    AA    &0.0 \\ %aa             14. 
    P\_MAPK--APC &    0.0 \\ %apc_pmapk      15.
    Active\_PKC--MAPK &   0.0 \\ %mapk_apkc         16.
    PMCA &    $1950/N_A/vol$ \\  %pmca    17.
    PMCA--Ca  & $375/N_A/vol$ \\ %pmcaCa     18.
    \hline
\end{tabular}
}
\caption{Initial values of the molecule concentrations.}
\label{tbl:init_values}
\end{table}

\begin{table}[h!]
{\scriptsize
  \begin{tabular}{|c|c|c|}
\hline
{Parameter} & {Value} & Unit \\
\hline
$APC$
& 1e-6 &M\\

$N_A$ & 6.02214e23 &$\mathrm{mol^{-1}}$\\

$vol$ & 1e-15 &L \\
\hline
\end{tabular}
}
\caption{The biochemical model paramter values and descriptions.}
\label{tbl:chem_params}
\end{table}



% $\underset{\ext{amphoteres Hydroxid}}{\ce{Zn(OH)2 v}}$
% <=>C[+2OH-][{+ 2H+}]
% $\underset{\text{Hydroxozikat}}{\cf{[Zn(OH)4]^2-}}$ }

% \begin{equation}
% \begin{aligned}
% %   2[Ca]+[Raf] \rightleftharpoons[]{k_f} [Active\ Raf]\\
%   \dot{[S]_i}=\sum_{r}(k_{r_i}\prod_{j}[S]_{j}), \quad
% \end{aligned}
% \end{equation}
\end{appendices}

\clearpage \bibliographystyle{plos2009} \bibliography{main}
\end{document}
