% Template for PLoS
% Version 3.0 December 2014
%
% To compile to pdf, run:
% latex plos.template
% bibtex plos.template
% latex plos.template
% latex plos.template
% dvipdf plos.template
%
% % % % % % % % % % % % % % % % % % % % % %
%
% -- IMPORTANT NOTE
%
% This template contains comments intended 
% to minimize problems and delays during our production 
% process. Please follow the template instructions
% whenever possible.
%
% % % % % % % % % % % % % % % % % % % % % % % 
%
% Once your paper is accepted for publication, 
% PLEASE REMOVE ALL TRACKED CHANGES in this file and leave only
% the final text of your manuscript.
%
% There are no restrictions on package use within the LaTeX files except that 
% no packages listed in the template may be deleted.
%
% Please do not include colors or graphics in the text.
%
% Please do not create a heading level below \subsection. For 3rd level headings, use \paragraph{}.
%
% % % % % % % % % % % % % % % % % % % % % % %
%
% -- FIGURES AND TABLES
%
% Please include tables/figure captions directly after the paragraph where they are first cited in the text.
%
% DO NOT INCLUDE GRAPHICS IN YOUR MANUSCRIPT
% - Figures should be uploaded separately from your manuscript file. 
% - Figures generated using LaTeX should be extracted and removed from the PDF before submission. 
% - Figures containing multiple panels/subfigures must be combined into one image file before submission.
% See http://www.plosone.org/static/figureGuidelines for PLOS figure guidelines.
%
% Tables should be cell-based and may not contain:
% - tabs/spacing/line breaks within cells to alter layout or alignment
% - vertically-merged cells (no tabular environments within tabular environments, do not use \multirow)
% - colors, shading, or graphic objects
% See http://www.plosone.org/static/figureGuidelines#tables for table guidelines.
%
% For tables that exceed the width of the text column, use the adjustwidth environment as illustrated in the example table in text below.
%
% % % % % % % % % % % % % % % % % % % % % % % %
%
% -- EQUATIONS, MATH SYMBOLS, SUBSCRIPTS, AND SUPERSCRIPTS
%
% IMPORTANT
% Below are a few tips to help format your equations and other special characters according to our specifications. For more tips to help reduce the possibility of formatting errors during conversion, please see our LaTeX guidelines at http://www.plosone.org/static/latexGuidelines
%
% Please be sure to include all portions of an equation in the math environment.
%
% Do not include text that is not math in the math environment. For example, CO2 will be CO\textsubscript{2}.
%
% Please add line breaks to long display equations when possible in order to fit size of the column. 
%
% For inline equations, please do not include punctuation (commas, etc) within the math environment unless this is part of the equation.
%
% % % % % % % % % % % % % % % % % % % % % % % % 
%
% Please contact latex@plos.org with any questions.
%
% % % % % % % % % % % % % % % % % % % % % % % %

\documentclass[10pt,letterpaper]{article}
\usepackage[top=0.85in,left=2.75in,footskip=0.75in]{geometry}

% Use adjustwidth environment to exceed column width (see example table in text)
\usepackage{changepage}

% Use Unicode characters when possible
\usepackage[utf8]{inputenc}

% textcomp package and marvosym package for additional characters
\usepackage{textcomp,marvosym}

% fixltx2e package for \textsubscript
\usepackage{fixltx2e}

% amsmath and amssymb packages, useful for mathematical formulas and symbols
\usepackage{amsmath,amssymb}

% cite package, to clean up citations in the main text. Do not remove.
\usepackage{cite}

% Use nameref to cite supporting information files (see Supporting Information section for more info)
\usepackage{nameref,hyperref}

% line numbers
\usepackage[right]{lineno}

% ligatures disabled
\usepackage{microtype}
\DisableLigatures[f]{encoding = *, family = * }

% rotating package for sideways tables
\usepackage{rotating}

% Remove comment for double spacing
%\usepackage{setspace} 
%\doublespacing

% Text layout
\raggedright
\setlength{\parindent}{0.5cm}
\textwidth 5.25in 
\textheight 8.75in

% Bold the 'Figure #' in the caption and separate it from the title/caption with a period
% Captions will be left justified
\usepackage[aboveskip=1pt,labelfont=bf,labelsep=period,justification=raggedright,singlelinecheck=off]{caption}

\usepackage{algorithm, algpseudocode}
\usepackage{eqparbox}
\renewcommand{\algorithmiccomment}[1]{\hfill\eqparbox{COMMENT}{\% #1}}
%\algnewcommand{\LineComment}[1]{\State \# #1}


% Use the PLoS provided BiBTeX style
\bibliographystyle{plos2009}

% Remove brackets from numbering in List of References
\makeatletter
\renewcommand{\@biblabel}[1]{\quad#1.}
\makeatother

% Leave date blank
\date{}

% Header and Footer with logo
\usepackage{lastpage,fancyhdr,graphicx}
\pagestyle{myheadings}
\pagestyle{fancy}
\fancyhf{}
\lhead{\includegraphics[natwidth=1.3in,natheight=0.4in]{PLOSlogo.png}}
\rfoot{\thepage/\pageref{LastPage}}
\renewcommand{\footrule}{\hrule height 2pt \vspace{2mm}}
\fancyheadoffset[L]{2.25in}
\fancyfootoffset[L]{2.25in}
\lfoot{\sf PLOS}

%% Include all macros below

\newcommand{\lorem}{{\bf LOREM}}
\newcommand{\ipsum}{{\bf IPSUM}}

%% END MACROS SECTION


\begin{document}
\vspace*{0.35in}

% Title must be 150 characters or less
\begin{flushleft} {\Large \textbf\newline{Numerical considerations for
      an efficient integration of coupled systems in multiscale
      modeling in neuroscience.  } }
  \newline
  % Insert Author names, affiliations and corresponding author email.
  \\
  Ekaterina Brocke\textsuperscript{1,2}, Name2
  Surname\textsuperscript{}, Name3 Surname\textsuperscript{}, Name4
  Surname\textsuperscript{}, Michael Hanke\textsuperscript{6}
  % Name6 Surname\textsuperscript{2,\Yinyang}, Name7
  % Surname\textsuperscript{3,*,\Yinyang}
  \\
  \bf{1} Department of Computational Biology, School of Computer
  Science and Communication, KTH Royal Institute of Technology,
  Stockholm, Sweden
  \\
  \bf{2} National Centre for Biological Sciences, Tata Institute of
  Fundamental Research, Bangalore, India
  \\
  \bf{3}
  \\
  \bf{4}
  \\
  \bf{5}
  \\
  \bf{6} Department of Mathematics, School of Engineering Sciences,
  KTH Royal Institute of Technology, Stockholm, Sweden
  \\

  % Insert additional author notes using the symbols described
  % below. Insert symbol callouts after author names as necessary.
% 
  % Remove or comment out the author notes below if they aren't used.
%
  % % Primary Equal Contribution Note
  % \Yinyang These authors contributed equally to this work.

  % % Additional Equal Contribution Note
  % \ddag These authors also contributed equally to this work.

  % % Current address notes
  % \textcurrency a Insert current address of first author with an
  % address update
  % % \textcurrency b Insert current address of second author with an
  % % address update
  % % \textcurrency c Insert current address of third author with an
  % % address update

  % % Deceased author note
  % \dag Deceased

  % % Group/Consortium Author Note
  % \textpilcrow Insert Collaborative Author line here

  * E-mail: brocke@kth.se
\end{flushleft}
% Please keep the abstract below 300 words
\section*{Abstract}
% Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur
% eget porta erat. Morbi consectetur est vel gravida
% pretium. Suspendisse ut dui eu ante cursus gravida non sed
% sem. Nullam sapien tellus, commodo id velit id, eleifend volutpat
% quam. Phasellus mauris velit, dapibus finibus elementum vel,
% pulvinar non tellus. Nunc pellentesque pretium diam, quis maximus
% dolor faucibus id. Nunc convallis sodales ante, ut ullamcorper est
% egestas vitae. Nam sit amet enim ultrices, ultrices elit pulvinar,
% volutpat risus.


% Please keep the Author Summary between 150 and 200 words Use first
% person. PLOS ONE authors please skip this step.  Author Summary not
% valid for PLOS ONE submissions.
\section*{Author Summary}
% Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur
% eget porta erat. Morbi consectetur est vel gravida
% pretium. Suspendisse ut dui eu ante cursus gravida non sed
% sem. Nullam sapien tellus, commodo id velit id, eleifend volutpat
% quam. Phasellus mauris velit, dapibus finibus elementum vel,
% pulvinar non tellus. Nunc pellentesque pretium diam, quis maximus
% dolor faucibus id. Nunc convallis sodales ante, ut ullamcorper est
% egestas vitae. Nam sit amet enim ultrices, ultrices elit pulvinar,
% volutpat risus.

\linenumbers Several strategies have been proposed for coupling
multiple components on a behavioral level lately
~\cite{bhalla2011multiscale,mattioni2013integration}. One of the main
concern about these strategies is the lack of a mathematical
justification. Here we bring up a problem of inefficiency and possible
numerical instability that may arise while coupling multiple
components comprising a multiscale system.

First, we introduced an implicit solver, two-step Backward
Differentiation formula (BDF2) as a possible alternative to the
conventional methods used in neuroscience. This numerical method has
appealed to our interest for several reasons. First, it was previously
introduced in a decoupled implicit integration form by Skelboe
in~\cite{skelboe2000accuracy}, its stability properties and error
propagation estimates were evaluated. Besides, suggested error
estimation mechanism for the method allowed us to use an adaptive
time-stepping algorithm. The power and applicability of the proposed
approach was demonstrated on a test multiscale model that was designed
as a prototype of the models used in the scope of interest.


\section*{Introduction}
The concept of multiscale modeling is used in many fields such as
meteorology~\cite{shukla2009seamless, kurowski2013toward}, cardiac
physiology~\cite{hernandez2011integration} and
neuroscience~\cite{bhalla2014multiscale}.  It refers to the style of
modeling in which different models possibly described by different
physical formalisms and acting on different temporal and(or) spatial
scales are used simultaneously in order to study important feautres of
a complex phenomenon at multiple levels of
organization~\cite{djurfeldt2007workshop}. For instance, in
neuroscience, the phenomena of synaptic plasticity span different
physical and chemical processes: physiological, cell electrodynamics
and molecular biology. Each process acts on its own temporal and
spatial scale and interacts bidirectionally and continuously with the
other processes.

An idea of data integration between different levels of neural
organization has become an important trend in
neuroscience. Bhalla~\cite{bhalla2011multiscale} explores cross-scale
interactions between cellular and subcellular levels in the context of
homeostasis and synaptic plasticity. The multiscale model proposes the
pruning mechanism of the weak synapses during cellular excitability.
Another study by Mattioni and Le
Nov{\`e}re~\cite{mattioni2013integration} presents an integration
between electrical and biochemical processes in the model of a Medium
Spiny Neuron (MSN). In particular, the influence of different input
patterns on membrane excitability and the mechanism of inter-spine
synaptic plasticity is studied.


How should the numerical solution of a multiscale system be arranged
in practice? Different components of the system might need to be
solved using different numerical methods.  This is for example the
case if the components are described through different physical
formalisms where one employs ODEs and the other is stochastically
formulated. In the case of a multiscale system formulated using a
single formalism it might be advantageous to treat different
components separately, both with regard to their description and with
regard to computing the solution.  If different components of a system
can be treated as independently as possible, computations might be
arranged to take advantage of modularity and parallelism.

K{\"u}bler and Schiehlen~\cite{kubler2000two} proposes that a complex
engineering system can be decomposed into modules at three levels of
description: the physical, mathematical and behavioral levels. The
components of a system can be interated at the mathematical level and
then solved using a single numerical solver. Here, we rather focus on
coupling at the behavioral level---the level of signals in the
computed solution. In particular, we are interested in coupling
different numerical solvers in a theoretically appropriate and an
efficient way.

There are quite a few considerations that have to be taken while
coupling the components at the behavioral level. First is the
practical aspect, for instance a minimal set of signals which need to
be communicated between system components and according
synchronization algorithm has to be defined. Mattioni and Le
Nov{\`e}re~\cite{mattioni2013integration} proposed an event-driven
algorithm where the synchronization of the scaled exchanged variables
of interest have to be communicated each time the event happens. The
algorithm showed better performance results in comparison with the
time-driven algorithm where the synchronization of exchanged variables
has to be performed at regular time intervals. Second is the
theoretical aspect, for instance whether the coupling of different
numerical methods has been done in an accurate and efficient way. The
latter question has not been addressed yet.

Different coupling techniques may suffer from such numerical
phenomenon as instability~\cite{arnold2001preconditioned}. One of the
main challenges in coupling numerical methods is to ensure convergence
of a discrete system. If a coupled integration is convergent, a proper
choice of the step size still has to be made in order to guarantee
numerical stability. It is important to keep in mind that the
numerical stability of a coupled integration is not guaranteed by the
stability of a separate integration of the system components. Then,
numerical integration methods, the order of approximation of the
exchanged variables has to be taken into consideration. At the same
time, it is crucial to provide integration in an efficient way keeping
the accuracy within desired bounds. Finally, the problem is becoming
complicated if system components are described by different physical
fomalisms~\cite{brandi2011connecting, rudiger2007hybrid,
  alfonsi2005adaptive}. For example, chemical interactions can be
described either in a deterministic or in a stochastic way possibly
accompanied by diffusion processes.

The paper aims to focus on the numerical aspects of different coupling
strategies. We present a method that allowed us to bridge subcellular
and cellular level models described by Ordinary Differential Equations
(ODEs) in an accurate and efficient way.

We begin with an overview of numerical engines widely used in the
scope of interest (Section~\ref{sec:numerical_methods}). In
Section~\ref{sec:adaptive_solver} we introduce an algorithm for an
adaptive control of the integration step size.  We present possible
organization strategies in the system composed of multiple components
in Section~\ref{sec:organizations}.  Section~\ref{sec:test_model}
describes the multiscale test model we use: its dynamics,
communication signals between the components, mathematical
formulation, details of implementation and verification
processes. Then, in Section~\ref{sec:results} we show the results and
analysis of the proposed coupling technique. Finally, in
Section~\ref{sec:discussion} we discuss the insights we gained from
this work, recommendations and possible future directions of research.


% You may title this section "Methods" or "Models".  "Models" is not a
% valid title for PLoS ONE authors. However, PLoS ONE authors may use
% "Analysis"
\section*{Materials and Methods}
\subsection*{Numerical methods}\label{sec:numerical_methods}
The object of study is typically cellular and subcellular (molecular)
levels of neuronal organization. On the cellular level electrical
properties of cell membranes are typically studied. The cell with its
complex arborizations is usually represented by a cable split into a
number of compartments. Then, we employ compartmental
modeling~\cite{rall1964theoretical} to describe the neural processes
where the dynamics of each compartment are defined by a coupled system
of ordinary differential equations (ODEs). Finally, by applying the
Hodgkin and Huxley formalism to define the
currents~\cite{hodgkin1952quantitative} a nonlinear system of ODEs has
to be solved. This approach is the basis of most simulators that take
neural morphology into account (e.g. NEURON~\cite{hines1989program},
GENESIS~\cite{wilson1990genesis}, MOOSE~\cite{dudani2009multiscale}).

On a molecular level the interaction of biochemical signaling pathways
is of particular interest. A signaling pathway is usually referred as
a set of reactions between the molecules that operate on a subcellular
level~\cite{bhalla1998network}. To describe a signaling pathway
chemical kinetics or chemical rate theory is usually applied.  One of
the traditional ways to model the kinetics is by viewing the system of
reactions as deterministic. Then the chemical species are usually
considered in concentration units that evolve over time. These models
are usually described by a system of nonlinear ODEs.

The complexity of the systems requires numerical
computations. Simulation packages offer the user a choice between
different numerical integration methods. The choice of the method is
usually dependent on the properties of a simulating system. For
example, if the system of ODEs is \textit{stiff} and an explicit
numerical method has been chosen then the step size of the
discretization is limited by stability and not by accuracy. Then, an
efficient numerical integration requires \textit{implicit}
methods. These methods allow the simulation to be discretized with
larger time steps due to the good stability properties. Fixed step
size numerical methods, such as the implicit Euler, the
Crank-Nicholson, the Runge-Kutta methods, are typically applied to
solve systems of current study in neuroscience. The choice of the
discretization time step is then usually made by running the
simulation with different step sizes and comparing the computation
cost versus solution accuracy.

The Crank-Nicholson (CN) method with the staggered time step approach
is widely used for solving branched nerve equations in
neuroscience~\cite{hines1984efficient}. The proposed approach allows
the user to obtain a solution in an accurate and efficient way
(Appendix~\ref{appx:cn_staggered}). The Crank-Nicholson method is an
implicit method and can be used for stiff systems. However little is
known about stability properties while working on a staggered grid
(see Appendix~\ref{appx:stability_cn_staggered} for details).  We have
implemented and applied this method to approximate the solution of the
electrical model. We will refer to it as modified CN (mCN) further on.

We have implemented the Classical Runge-Kutta (RK4) method to solve
our biochemical model~\cite{bhalla2011multiscale}. This method is
known to be an explicit numerical method with a bounded stability
domain and therefore it is not suitable for stiff problems. A
mathematical formulation of the method can be found in
Appendix~\ref{appx:crk}.

While both the mCN and the RK4 methods provide efficient means for
simulating the models on the appropriate levels, the coupling of these
methods poses additional questions.

\begin{enumerate}
\item Multiscale systems are usually composed from components acting
  on different timescales. For example, the timescale of a single
  spike is of the order of a few milliseconds. However, simulations
  usually run for many seconds in order to observe the effects at a
  biochemical level. The gap in the timescales demands an efficient
  integration strategy. We suspect that adaptive integrators can be
  more efficient or require less time for a given degree of accuracy
  due to the alternate activity of fast and slow components in
  multiscale systems. Both the mCN and the RK4 methods are normally
  applied on a fixed step size grid. None provides an error estimation
  mechanism that could be used together with a step size control
  mechanism.
\item Little is known about the error propagation for the mCN - RK4
  coupled integration. The order of the numerical method quantifies
  the error behaviour in respect to the step size. We know that the
  mCN method is second order accurate and the RK4 method is of the
  order four while the order of the coupled integration still has to
  be studied.
\item Numerical stability of the system components solved separately
  does not guarantee the stability of the coupled system in general as
  shown in Appendix~\ref{appx:instability}. Our multiscale system has
  to be stiff due to the rapid changes of the electrical component. To
  be on the safe side we aim to avoid using methods not suitable for
  stiff problems.
\end{enumerate}

Considering the points above we implement another approximation
method, the Backward Differentiation Formula (BDF). In particular, we
are interested in a second-order BDF (BDF2) widely used for solving
stiff differential equations and Differential Algebraic Equations
(DAEs). The BDF2 method was analyzed with respect to its stability
properties and error estimates were provided within a
decoupled\footnote{ According to the algorithm presented by Skelboe
  in~\cite{skelboe2000accuracy} a given system has to be partitioned
  into loosely coupled subsystems first and then decoupled formulas
  are applied to integrate the system. Here, the subsystems to be
  coupled are given. Thus we apply the term "coupled" hereinafter.}
numerical integration strategy~\cite{skelboe2000accuracy}. The
mathematical formulation of the method is given in
Appendix~\ref{appx:bdf2}. We apply this method to approximate
solutions both on the cellular and subcellular levels. We have
implemented the method on a fixed step size grid in order to be able
to perform straightforward comparison with the mCN and the RK4
methods. However for most of the results we use the method with an
adaptive step size controller as described in
Section~\ref{sec:adaptive_solver}.

\subsection*{Adaptive step size controller}\label{sec:adaptive_solver}
The aim of the adaptive step size controller is to reduce the
computational cost of the simulation while keeping the local error
within acceptable bounds. The reduced computational cost is mainly
achieved by a reduced number of the required time steps at which a
solution is approximated.

We implement an adaptive controller of the step size as described by
Deuflhard and Bornemann~\cite{deuflhard2002scientific}. It calculates
a local discretization error $|\left[\epsilon_j\right]|$\footnote{In
  fact, since the quantity of the local discretization error
  $|e_{j+1}|$ cannot be determined \textit{exactly}, the notion of
  some \textit{computable estimate}
  $|e_{j+1}|\approx|\left[\epsilon_j\right]|$ was introduced by
  Deuflhard and Bornemann in~\cite{deuflhard2002scientific}. } of the
taken step $h_j$ to calculate an optimal step size $h$
(Algorithm~\ref{alg:controller}, line \#\ref{alg:controller-line9})
and then uses it as a predictor for the nest step size. In other
words, if the calculated quality of the current step $h_j$ is good
enough (Algorithm~\ref{alg:controller}, line
\#\ref{alg:controller-line10}), the algorithm continues with the just
calculated optimal step size $h$ as the \textit{next} step
size. Otherwise, the just calculated optimal step size $h$ is used as
the \textit{current} step size (Algorithm~\ref{alg:controller}, line
\#\ref{alg:controller-line17}) and the current step has to be
redone. A description of the algorithm is shown in
Algorithm~\ref{alg:controller}.

\begin{algorithm}
  \caption{Step size controller (Deuflhard and
    Bornemann~\cite{deuflhard2002scientific})} \label{alg:controller}
  \begin{algorithmic}[1]
    \State $h_0=\bar{h}_0$\Comment{Choose initial step size prediction
      $\bar{h}_0$}
    % \State Choose initial value $x_0$ and first time step $h_0$
    \State $j = 0$ \Comment{Initiate the iteration index} \State
    $\Delta_t = \{t_0\}$ \Comment{Initiate the time set} \State
    $x_{\Delta}(t_0) = x_0$ \Comment{Initiate the solution set}
    \While{$t_j < T$} \Comment{Within the simulation time $T$ do:}
    \State $t = t_j + h_j$ \State $x = \Psi^{t,t_j}x_{\Delta}(t_j)$
    \Comment{Advance the solution from $t_j$ to $t$} \State compute
    $|\left[\epsilon_j\right]|$ \Comment {See
      Section~\ref{sec:error_estimation}}\State $h
    =\min\left(qh_j,h_{max}, \sqrt[p+1]{\frac{\rho TOL}
        {|\left[\epsilon_j\right]|}}h_j\right)$ \label{alg:controller-line9}
    \Comment{Calculate an optimal time step $h$}
    \If{$|\left[\epsilon_j\right]|\leq
      TOL$} \label{alg:controller-line10} \Comment{Step is accepted}
    \State $t_{j+1} = t$ \State $\Delta_t = \Delta_t \cup \{t_{j+1}\}$
    \Comment{Update the time set} \State $x_{\Delta}(t_{j+1}) = x$
    \Comment{Update the solution set}\State $h_{j+1}$ =
    min$(h,T-t_{j+1})$ \Comment{Set the next time step}\State $j=j+1$
    \Comment{Advance integration}\Else \Comment{Step is rejected}
    \State $h_j = h$ \label{alg:controller-line17} \Comment{Set the
      current time step}
    \EndIf
    \EndWhile
  \end{algorithmic}
\end{algorithm}

The optimal time step $h$ is calculated according to the following
formula:
\begin{equation}\label{eq:opt_tau}
  h =\min\left(qh_j,h_{max},\sqrt[p+1]{\frac{\rho TOL}
      {|\left[\epsilon_j\right]|}}h_j\right),
\end{equation}
where $\rho$ ($\rho < 1$) is a safety factor; $p$ is the order of the
discretization method specified in the discrete evolution
$\Psi^{t+h,t}$; $qh_j~(q>1)$ and $h_{max}$ are additional bounds on
the step size increase for the situations when the error estimation
value ($|\left[\epsilon_j\right]|$) becomes or is close to zero.

The last term of Eq.~\eqref{eq:opt_tau} represents an
\textit{I-controller} mechanism. According to Deuflhard and
Bornemann~\cite{deuflhard2002scientific} there can be situations where
a \textit{ PID-controller} is preferable:
\begin{equation}\label{eq:PIDcontroller}
  h_{k+1}=\left(\frac{\rho\cdot Tol}{|\left[\epsilon_{k+1}\right]|}\right)^{\beta_I+\beta_P+\beta_D} 
  \left(\frac{|\left[\epsilon_{k}\right]|}{\rho\cdot Tol}\right)^{\beta_P+2\beta_D}
  \left(\frac{\rho\cdot Tol}{|\left[\epsilon_{k-1}\right]|}\right)^{\beta_D}h_k
\end{equation}
Note, letting the parameters ($\beta_P,\beta_D,\beta_I$) of
Eq.~\eqref{eq:PIDcontroller} be chosen such that $\beta_P=\beta_D = 0$
and $\beta_I=1$ Eq.~\eqref{eq:PIDcontroller} represents the
I-controller.

In order to implement Algorithm~\ref{alg:controller} an error
estimation mechanism should be provided.

\paragraph{Estimate of the local discretization
  error.} \label{sec:error_estimation} The error estimation is method
dependent. Below we describe the error estimation for the BDF2 method
based on a predictor-corrector algorithm. The difference of the
discrete evolutions given by Eq.~\eqref{eq:error control} represents
an estimate of the local error $|\left[\epsilon\right]|$.
\begin{equation}\label{eq:error control}
  \left|\left| \Psi^{t+h,t}x - \hat{\Psi}^{t+h,t}x\right|\right| \leq TOL,
\end{equation}
where the prediction step taken by the discrete evolution
$\hat{\Psi}^{t+h,t}$ calculates a rough approximation of the solution
and the corrector step taken by ${\Psi}^{t+h,t}$ refines the initial
approximation.

% bound the norm of the difference between the second order polynomial
% predictor $Y^{p2}_n$ and the approximated solution $Y_n$ to control
% the local error (Equation~\ref{eq:error control}) at each time
% step. We use the same polynomial function $Y^{p2}_n$ as a predictor
% in error estimation as the one required to approximate exchanged
% variables (Section~\ref{sec:extrapolation}). To approximate the
% solution $Y_n$ we use BDF2 method according to
% Section~\ref{sec:numerical_methods}.
Often, the tolerance $TOL$ is set as a combination of a relative
tolerance, $relTOL$, and an absolute tolerance, $absTOL$. This can be
expressed as
\begin{equation}
  \left|\left| \Psi^{t+h,t}x - \hat{\Psi}^{t+h,t}x\right|\right| \leq relTOL \cdot \left|\left|\Psi^{t+h,t}x\right|\right| + absTOL
  % \left|\left| Y_n - Y^{p2}_n\right|\right| \leq Tol = relTol \cdot
  % \left|\left|Y_n\right|\right| + absTol
\end{equation}
From a practical point of view, it is more appropriate to use a
similar requirement component-wise. Rearranging it gives the error
control quantity $\left|\left[ \epsilon\right] \right|$ :
\begin{equation}\label{eq:error_estimation}
  \left|\left[ \epsilon\right] \right| = \max_i\frac{\left|\Psi^{t+h,t}x_i - \hat{\Psi}^{t+h,t}x_i\right|}{relTol\cdot \left|\Psi^{t+h,t}x_i\right| + absTol_i}\leq 1
  % \left|\left[ \epsilon_{j+1}\right] \right| =
  % \max_i\frac{\left|Y_{n,i}-Y^{p2}_{n,i}\right|}{relTol\cdot
  % \left|Y_{n,i}\right| + absTol_i}\leq 1 = TOL
\end{equation}
% Moreover, it is common to express the absolute tolerance as the
% relative tolerance multiplied with a typical solution as
% $\mathrm{absTol_i} = \mathrm{relTol} \cdot |Y_{typical,i}|.$
Therefore, $TOL$ in Algorithm~\ref{alg:controller} becomes simply
$TOL=1$.

The best efficiency is usually achieved if both the predictor and the
corrector are of the same order. Furthermore polynomial interpolations
formulas are preferred as predictors in connection with stiff
problems~\cite{skelboe2000accuracy}. The prediction step in
Eq.~\eqref{eq:error_estimation} is taken by the discrete evolution of
the second order polynomial described later on in
Section~\ref{sec:extrapolation} and the prediction step --- by the
BDF2 method accordingly.

\subsection*{Organization of computations}\label{sec:organizations}

In the distributed numerical integration the components of a system
are solved separately on time windows $[t_n,t_{n+1}]$. Then
information is exchanged at synchronization points $0=t_0<\dots < t_n
< t_{n+1}<\dots$ . Different aspects of integration are usually
considered: the use of different discretization step sizes in the
components, the coupling of different numerical methods, various
organizations of computations between system components. Here, we
study two organizations: \textit{Jacobi} and
\textit{Gauss-Seidel}. The choice may have a crucial impact on both
numerical stability and accuracy.

We introduce the notion of \textit{macro time step} and \textit{micro
  time step}. The macro time step determines the communication points:
how long the components can run independently from each other without
losing accuracy. Micro time step determines the discretization points
of each component between two communication points. The latter is
usually determined by accuracy, stability and the numerical method
used. In all our simulations we choose the micro-step equal to the
macro-step, unless stated otherwise.

For simplicity we take an abstract system composed of two components:
component~\textit{1} and component~\textit{2}. Then we can define the
system using continuous representation as:

\begin{equation}\label{eq:System}
  \begin{aligned}
    &\frac{d}{dt}x_1 = f_1(t,x_1,x_2)\\
    &\frac{d}{dt}x_2 = f_2(t,x_1,x_2),
  \end{aligned}
\end{equation}
where $x_{\mathrm{1}},x_{\mathrm{2}}$ are solution vectors of
respective component.

\paragraph{Jacobi organization.}
Jacobi organization in the system with two components leads to the
interaction shown in Figure~\ref{fig:jacobi}. In order to make a step
from time $t_n$ to $t_{n+1}$ each component gets exchanged variables
at time $t_n$ (white triangle arrows on
Figure~\ref{fig:jacobi}). Then, the components proceed to the time
point $t_{n+1}$.
\begin{figure}[h]
  \centering
  % \includestandalone{figures/jacobi}
  \caption{{\bf Discretization in time of System~\eqref{eq:System}
      using Jacobi organization.} White triangle arrows correspond to
    communication signals between component~\textit{1} and
    component~\textit{2}. $H_n$ is a macro time step in the system,
    $h_{x_1},h_{x_2}$ are micro time steps of component~\textit{1}
    and~\textit{2} accordingly.}
  \label{fig:jacobi}
\end{figure}


Requiring the exchanged variables to be known at time $t_{n+1}$ and
considering Jacobi organization, System~\eqref{eq:System} can be
rewritten in the form:

\begin{equation}\label{eq:jacobi}
  \begin{aligned}
    &\frac{d}{dt}x_1 = f_{1}(t,x_{1},\tilde{x}_{2,n+1})\\
    &\frac{d}{dt}x_2 = f_{2}(t,\tilde{x}_{1,n+1},x_{2}),
  \end{aligned}
\end{equation}
where $\tilde{x}_{1,n+1}$ and $\tilde{x}_{2,n+1}$ are approximated
solutions of component~\textit{1} and component~\textit{2}
respectively. This organization works very well in a parallel
computations since no component needs to wait for the other.

\paragraph{Gauss-Seidel organization.}
The Gauss-Seidel organization updates each component in a sequential
order (Figure~\ref{fig:GS}). Let component~\textit{1} be the leading
component in System~\eqref{eq:System}. Then, after the solutions have
been communicated at time $t_n$ (white triangle arrows on
Figure~\ref{fig:GS}), component~\textit{1} proceeds until
$t_{n+1}$. Then the calculated solution of component~\textit{1} at
time $t_{n+1}$ can by used by component~\textit{2}. This communication
principle has been used by Mattioni and Le Nov{\`e}re
in~\cite{mattioni2013integration}.

\begin{figure}[h]
  \centering
  % \includestandalone{figures/gausseidel}
  % \includegraphics[width=0.6\textwidth]{figures/gausseidel.pdf}
  % \input{figures/gausseidel.tex}
  \caption{{\bf Discretization in time of System~\eqref{eq:System}
      using Gauss-Seidel organization.} White triangle arrows
    correspond to communication signals between component~\textit{1}
    and component~\textit{2}. $H_n$ is a macro time step in the
    system, $h_{x_1},h_{x_2}$ are micro time steps of
    component~\textit{1} and~\textit{2} accordingly.}
  \label{fig:GS}
\end{figure}

The Gauss-Seidel organization allows to eliminate the error introduced
by a solution approximation as shown in System~\eqref{eq:GS}. However
it is more difficult to apply this strategy efficiently during
parallel simulations since at least one component has to wait for the
other.

\begin{equation}\label{eq:GS}
  \begin{aligned}
    &\frac{d}{dt}x_1 = f_{1}(t,x_{1},\tilde{x}_{2,n+1})\\
    &\frac{d}{dt}x_2 = f_{2}(t,x_{1},x_{2}),
  \end{aligned}
\end{equation}
where $\tilde{x}_{2,n+1}$ is an approximated solution of
component~\textit{2}.

\paragraph{Exchanged variables
  approximation.}\label{sec:extrapolation}
We aim to simulate two components independently where the solution of
one component depends on the solution of another component of each
integration time step as shown in System~\eqref{eq:System}. It can
happen that the information is not available at a certain time
point. Then an approximation of exchanged variables can be
considered. For instance, the application of an implicit numerical
method requires the knowledge at the time point $t_{n+1}$ as shown in
System~\eqref{eq:jacobi} and in System~\eqref{eq:GS}.

Here we will compare two approximation strategies, so called
\textit{Mode~1} and \textit{Mode~3} \cite{skelboe2000accuracy}.
Mode~1 implies a constant extrapolation $\tilde{x}_{n+1} = x_{n}$, so
that the solution at the previous time step $n$ is used when
required. Mode~3 uses a second order polynomial to approximate the
value at time $t_{n+1}$:
\begin{equation}\label{eq:predictor}
  \tilde{x}_{n+1}=x^{p2}_{n+1} = \bar{\alpha}_{1}x_{n}+\bar{\alpha}_{2}x_{n-1}+\bar{\alpha}_{3}x_{n-2}
\end{equation}

\begin{equation*}
  \bar{\alpha}_1 = 1-\bar{\alpha}_2-\bar{\alpha}_3,\quad
  \bar{\alpha}_2 = \frac{\gamma_{n+1}(\gamma_{n+1}+\delta_{n+1})}{1-\delta_{n+1}},\quad
  \bar{\alpha}_3 = \frac{\gamma_{n+1}(\gamma_{n+1}+1)}{\delta_{n+1}(\delta_{n+1}-1)},
\end{equation*}
where $\gamma_{n+1}=h_{n+1}/h_{n}$ and $\delta_{n+1}=1+h_{n-1}/h_{n}$.


\subsection*{Test model}\label{sec:test_model}
In our test multiscale model we span two levels of neural
organization: we model the electrical dynamics of a single neuron and
biochemical processes in one of its compartments named
\textit{spine}. The parameters of the models are given in
Appendix~\ref{appx:models}.

\paragraph{Dynamics.}
A stimulating current is applied to the \textit{soma}, resulting in
depolarization of the membrane potential. The change in the membrane
potential activates voltage dependent sodium and potassium channels
and a spike train propagates through the axial resistance to the
spine. Spine depolarization activates voltage dependent calcium
channels and a calcium current flow into the spine. These processes
act on a time scale of few milliseconds. In turn, Ca influx triggers
multiple signaling cascades on a sub-cellular level. We use the
mitogen-activated protein kinase (MAPK/ERK) signaling model taken
from~\cite{bhalla2011multiscale}. This model exhibits a bistable
chemical switch that is triggered by the calcium influx. This switch
then leads to changes in synaptic conductance of the neuron. The time
scale of the chemical system is approximately 10~s, thus around 100
times larger than that of the electrical system. This dynamics mimics
the one considered to play an important role in such phenomena as
synaptic plasticity~\cite{hayer2005molecular}.

A current of ${0.09\cdot 10^{-9}}$~A is injected for 5~s which is
sufficient to elevate the ${Ca^{2++}}$ level up to 1~$\mathrm{\mu M}$,
required for the biochemical switch. When the injection has finished,
the simulation continues until it reaches 45~s of the total simulation
time. Then the biochemical system has settled at its second steady
state.

We choose three molecular concentrations to plot as our reference
solution. Figure~\ref{fig:refsolu} shows calcium $Ca^{2+}$,
phosphorylated form of MAPK (\textit{P\_MAPK}) and potassium $Ka$
concentration traces. When \textit{P\_MAPK} is activated, it
phosphorylates potassium and leads to its non-conductivity.  The thick
part of the calcium concentration trace corresponds to the oscillating
behavior of the $Ca^{2+}$ current during the stimulation period
between 1~s and 6~s of the simulation time.

\begin{figure}[h]
  \centering
  % \includegraphics[width=0.75\textwidth]{figures/correct_sol.eps}
  \caption{{\bf The solution of $\mathbf{Ca^{2+}}$, \textit{P\_MAPK}
      and \textit{Ka} molecule concentrations obtained with
      \textit{ode15s}\protect\footnotemark~MATLAB function.} To
    achieve high accuracy, the \textit{RelTol} and \textit{AbsTol}
    parameters were set to $10^{-9}$ and $10^{-12}$ respectively.}
  \label{fig:refsolu}
\end{figure}

\footnotetext{\textit{ode15s} is a solver designed for stiff
  problems. It is a quasi-constant step size implementation of the
  backward differentiation methods~\cite{shampine1997matlab}.}


\paragraph{Communication signals.}
We use ${Ca^{2+}}$ influx as a key signal in our multiscale model. The
${Ca^{2+}}$ current in the electrical model ($I_{Ca}$) is transformed
to the ${Ca^{2+}}$ injection rate to the biochemical model ($k_{inj}$)
as shown in Eq.~\eqref{eq:Kinj}.
\begin{equation}\label{eq:Kinj}
  k_{inj}=\frac{N_{e}}{2\cdot N_{A} \cdot vol}\cdot I_{Ca} \quad \left[\frac{M}{s}\right],
\end{equation}
where $N_{e}$ is the number of electrons in one Coulomb which roughly
equal to $6.242\cdot10^{18}$, $N_{A}$ is Avogadro's constant and $vol$
is the volume of the spine compartment.

In turn, the biochemical system provides calcium concentration and the
fraction of phosphorylated calcium dependent potassium channels in the
spine. The fraction then is used in the conductance evolution of the
$I_{K}$ current in the electrical system Eq.~\eqref{eq:frac}.
\begin{equation}\label{eq:frac}
  g_{K}=\bar{g}_{K}\frac{[Ka]}{[Ka]^*} \quad \left[S\right],
\end{equation}
where ${[Ka]}$ is the concentration of non-phosphorylated calcium
dependent potassium channels, $[Ka]^*$ is the total concentration of
calcium dependent potassium channels, $\bar{g}_{K}$ is the maximum
potassium conductance.

\paragraph{Mathematical formulation.}
We use compartmental modeling with the HH formalism to define 17
subsequent electrical circuits of the neuron that result in 24 ODEs
(Appendix~\ref{appx:electrical}). Chemical reactions in the spine are
defined by reaction-rate equations constituting a non-linear system of
18 ODEs (Appendix~\ref{appx:biochemical}). Considering the
communication signals in the system, then System~\eqref{eq:System} can
be reformulated in the following form:
\begin{equation}\label{eq:sys_combined}
  \begin{aligned}
    &\frac{d}{dt}x_{chem} = f_{chem}(t,x_{chem},g_{1}(x_{elec}))\\
    &\frac{d}{dt}x_{{elec}} =
    f_{{elec}}(t,g_{2}(x_{{chem}}),x_{{elec}}),
  \end{aligned}
\end{equation}
where $g_1$ and $g_2$ are the output functions from the electrical and
the biochemical component respectively.
\begin{equation}\label{eq:comm_signals}
  \begin{aligned}
    &g_{1}(x_{{elec}},x_{chem}) = C_{1} (x_{{elec},i}-C_{2}
    ln(x_{{chem},j})+C_{3}) x_{{elec},k}^2 x_{{elec},l}
    \\
    & g_{2}(x_{{chem}}) = \left\{ \begin{array}{l}
        x_{{chem},j} % & \quad \text{where}
        \\ C_{4} x_{{chem},m} % & \quad \text{where}
      \end{array},\right.
  \end{aligned}
\end{equation}

where $C_{1}..C_{4}$ are the constants; the indices ${i..m}$
correspond to the variables in the according solution vector $x$ at
time $t$:

${i}$ - potential in the spine [V]

${j}$ - calcium concentration in the spine [M]

${k}$ - probability for an s gate being opened (calcium channel
activation)

${l}$ - probability for an r gate begin opened (calcium channel
inactivation)

${m}$ - concentration of active (non-phosphorylated) calcium dependent
potassium channels [M]

K{\"u}bler and Schiehlen in~\cite{kubler2000two} defines the coupled
integration to be zero-stable\footnote{Zero-stability is a property of
  a numerical method that guarantees the stability of the discrete
  system if the step size goes to zero.} if algebraic
loops % \footnote{It is said that
% there exists an algebraic loop if any interconnections in the system
% form a closed loop of components, i.e. the output of each component
% depends on the inputs. }
do not exist between the components. Since only the output function of
the electrical component $g_1$ depends on the solution vector of the
chemical component $x_{chem,i}$, we can expect a zero-stability of the
coupled integration\footnote{Among the assumptions made during the
  analysis of zero-stability in the coupled integration by K{\"u}bler
  and Schiehlen in~\cite{kubler2000two} was an assumption of linearity
  of the output functions. The latter does not hold in our system.  }.

\paragraph{Implementation and Simulation.}
Both models, electrical and biochemical, have been implemented in
MATLAB\textsuperscript{\textregistered}. The electrical component can
be solved either by the Crank-Nicholson method on a staggered grid or
by the BDF2 method. The solution of the biochemical component can be
obtained either by the RK4 method on a fixed step size grid or by the
BDF2 method. The adaptive step size controller has been used only when
both the electrical and biochemical models have been discretized with
the BDF2 method.

The numerical parameters, that is the value of $relTol$ in the
adaptive step size solver and the size of the grid in the fixed step
solver, have been chosen to satisfy the minimal requirenements imposed
by the system. That is the relative tolerance of $relTol=10^{-3}$
corresponds to the order of the smallest solution component, the fixed
step size grid of $h_{elec}=h_{chem}=1.125\cdot 10^{-4}$ has been
chosen according to the step size required to follow the dynamics of
the fast system. Then the values were refined in order to explore
asymptotic properties of coupling strategies.


\paragraph{Validation and Verification.}\label{sec:verification}
A straight forward way to understand whether the solution is valid can
be to use a visual comparison technique. However a rigorous way to
verify the solution is required.

The calcium communication signal during the stimulation has a crucial
impact on the biochemical component in the multiscale system. The
calcium concentration trace shows an oscillating behavior following
calcium current dynamics of the electrical component during the
stimulation as shown in Figure~\ref{fig:refsolu}. We choose the
Discrete Fourier Transform (DFT) of the calcium trace between 3~s and
5~s of the simulation time to analyze the accuracy of our results. Of
particular interest is the zeroth order Fourier coefficient that
corresponds to a summation of the function values. With an appropriate
normalization factor, $n(N)=\frac{1}{N}$, it corresponds to the mean
value at the chosen interval. Since we do not know the analytic
solution, we use our reference solution obtained with \textit{ode15s}
(shown in Figure~\ref{fig:refsolu}) and calculate an error relative to
it as:

\begin{equation}\label{eq:e_rel}
  e_{Ca}=\frac{||\operatorname{fft0}(\check{x}_{\Delta,Ca})|-|\operatorname{fft0}(x_{\Delta,Ca})||}{|\operatorname{fft0}(\check{x}_{\Delta,Ca})|}\cdot n(N)\cdot 100\quad[\%],
\end{equation}
where $\operatorname{fft0}()$ is a function that returns the first
point of the discrete Fourier transform performed on the solution and
computed with a fast Fourier transform algorithm;
$\check{x}_{\Delta,Ca}$ is the $Ca^{2+}$ solution set on the interval
(3,5)~s obtained with \textit{ode15s}; $x_{\Delta,Ca}$ is the
$Ca^{2+}$ solution set on the same interval calculated with the
analyzed method.

The error at the end of the simulation interval can also be used for
accuracy estimates. We calculate the relative error of \textit{Ka} and
\textit{P\_MAPK} concentration values at time $T$ in the following
way:
% concentrations on our reference solution as well. We can also use
% the relative error of \textit{P\_MAPK} and \textit{Ka} concentration
% values at the end of the simulation interval for our accuracy
% estimates.
\begin{equation}
  e_{Ka}=\frac{|\check{x}_{T,Ka}-x_{T,Ka}|}{\check{x}_{T,Ka}}\cdot 100\quad[\%],
\end{equation}
where $\check{x}_{T,Ka}$ is the $Ka$ solution at time $T$ obtained
with \textit{ode15s}; $x_{\Delta,Ka}$ is the $Ka$ solution at time $T$
calculated with the analyzed method. The same formula is applied to
calculate $e_{\text{\textit{P\_MAPK}}}$.

To look at efficiency we plot the error versus the number of function
calls of the right hand side of the system of ODEs.

% Results and Discussion can be combined.
\section*{Results/Discussion}

\subsection*{ The BDF2-BDF2 coupling outperformed RK4-mCN on a fixed
  step size grid}
We have a stiff electrical component that is solved with the mCN
numerical integration method. The RK4 method is used to solve the
biochemical system~\cite{bhalla2011multiscale}. Both methods are
applied on a fixed step size grid. The BDF2 method can also be used on
a fixed step size grid however can be inefficient due to the
additional cost of iterations required for implicit methods.

We compared the proposed implicit solver, that is the BDF2 method,
with the conventionally methods used in neuroscience on a fixed step
size grids (Figure~\ref{fig:fss-sol}).  % The comparison was made on
% three different fixed step size grids with
% $h_{elec}=h_{chem}=\{1.125\cdot 10^{-4};~5.625\cdot
% 10^{-5};~28.125\cdot 10^{-6}\}$
% % $4\cdot 10^5,8\cdot 10^5,1.6\cdot 10^6$ number of steps,
% respectively.
We observed that an expected second order accuracy of the modified
Crank-Nicholson method was abandoned after its coupling with the
fourth order accurate Runge-Kutta method, while the accuracy of the
decoupled BDF2 formula was maintained. Though this observation can be
system dependent or even may vary with the method chosen to compare
the errors. We also achieved slightly more accurate results than with
the conventional methods on a given range of fixed step size
grids. These promising results indicated a further direction of the
research.
% We want to compare two approximation couplings, BDF2-BDF2 with
% RK4-mCN, on a fixed step size grid in order to get a relative
% estimate for the BDF2 method efficiency.  The results are shown in .

\begin{figure}[h]
  \centering %\subfigure[Jacobi iteration] {
  % \includegraphics[width=0.75\textwidth]{./figures/fixed-jacobi.eps}
  \caption{{\bf Efficiency comparison between the RK4-mCN ('plus' markers) 
    and the BDF2-BDF2 ('square' markers) coupling on a
    fixed step size grid.} The datapoints on each curve correspond to
$h_{elec}=h_{chem}=\{1.125\cdot 10^{-4};~5.625\cdot 10^{-5};~28.125\cdot 10^{-6}\}$, respectively. The first discretization corresponds approximately to
the one required in the fastest component when solved separately.    
%the number of steps $(4\cdot 10^5,8\cdot 10^5,1.6\cdot 10^6)$.
The dashed lines correspond to the first and second order
declines. The simulations were performed with Jacobi organization
between the electrical and the chemical component in the
system. Second order polynomial was used to approximate exchanged
variables (Mode 3).}
\label{fig:fss-sol}
\end{figure}

% First, we noticed that the lowest order of accuracy in the RK4-mCN
% coupling, that is the second order accuracy of the mCN method, is
% not preserved while the BDF2-BDF2 coupling followed the second-order
% decline.  The solution obtained with BDF2-BDF2 coupling was more
% accurate given equal number of function calls.



\subsection*{The PI-controller produced a smoother distribution of
  step sizes}\label{sec:pi-controller}
An optimal behavior of the step size controller is when the step sizes
that have to be taken do not have an extensive variation. If not, it
increases the number of times the step size controller has to redo the
step. A smooth distribution of the step sizes leads to good
performance of the controller.

We compared two types of controllers as described in
Section~\ref{sec:adaptive_solver}, I-controller and PI-controller in
Figure~\ref{fig:stepsizeController}. The PI-controller showed a much
smoother step size variation for our system and thus allowed us to
obtain higher performance during the integration. In our further
observations we used a PI-controller with $a=0.7,b=0.4$.

\begin{figure}[h]
  \centering
  % \begin{subfigure}[b]{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/regular.pdf}
  %   \label{fig:stepsizeController Regular}
  %   \caption{The full simulation time interval}
  % \end{subfigure}
  % \begin{subfigure}[b]{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/zoomed-regular.pdf}
  %   \label{fig:stepsizeController Zoomed}
  %   \caption{Simulation interval $\lbrack5.5\ s,9\ s\rbrack$}
  % \end{subfigure}
  \caption{{\bf The size of a step taken by the I-controller (A)
      and PI-controller (B), as a function of simulation time.}}
  \label{fig:stepsizeController}
\end{figure}

Note that the stimulation period had to be resolved with very small
time steps. This was also the case for the solution obtained by
\textit{ode15s}. For the PI-controlled solution, 90\% of the taken
steps resided in this time interval.

\subsection*{An appropriate approximation of exchanged variables had
  to be considered}\label{sec:mode_comparison}
In this experiment we compared the constant extrapolation (Mode~1)
with the second order polynomial extrapolation of exchanged variables
(Mode~3) described in Section~\ref{sec:extrapolation}. We used Jacobi
organization of the components and an adaptive step size controller
with the BDF2 approximation method to solve the system.  We presented
the relative error of %\textit{P\_MAPK} ($e_{P\_MAPK}$)
% {\text{\textit{P\_MAPK}}}$)
\textit{Ka} ($e_{Ka}$) concentration at the end of the simulation
interval (Figure~\ref{fig:mode_comparison}A) and the relative error
$e_{Ca}$ based on FFT analysis during the stimulation interval
(Figure~\ref{fig:mode_comparison}B) versus number of function calls to
the right hand side of the system of ODEs.

\begin{figure}[h]
  \centering
  % \begin{subfigure}[b]{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/adaptive_ka.eps}
  %   \caption{The accuracy of the solution is based on the $e_{Ka}$
  %   relative error measurements.}
  %   \label{fig:mode_ka}
  % \end{subfigure}
  % \begin{subfigure}[b]{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/adaptive_fft.eps}
  %   \caption{The accuracy of the solution is based on the $e_{Ca}$
  %   relative error measurements.}
  %   \label{fig:mode_ca}
  % \end{subfigure}
  \caption{{\bf Efficiency comparison between Mode~1 and Mode~3 with
      Jacobi organization between the components in the system}. The
    datapoints in the figures correspond to the
    $relTol=\{10^{-3};~5\cdot 10^{-4};~5\cdot 10^{-5};~5\cdot
    10^{-6}\}$. An extra datapoint with $relTol=5\cdot 10^{-7}$ was
    plotted for the solution obtained in Mode~3. The line with the
    'plus' markers represent an error behavior while solving the
    system in Mode~1, the line with the 'square' markers - the system
    is solved in Mode~3. The dashed lines correspond to the first and
    second order declines. \\ (A) The accuracy of the solution is
    based on the $e_{Ka}$ relative error measurements.\\ (B) The
    accuracy of the solution is represented by the $e_{Ca}$ relative
    error.}
  \label{fig:mode_comparison}
\end{figure}

For the error estimations at the end of the simulations interval
$e_{Ka}$ we observed an expected asymptotic behavior in Mode~3 for
$relTol$ values above $5\cdot 10^{-5}$ and only a first-order of
coupling in Mode~1. The loss of the second-order suggested that Mode~1
with Jacobi organization should be avoided in our system. This was
valid for both error estimation $e_{Ka}$ and $e_{P\_MAPK}$ (not
shown).

Comparing the relative errors $e_{Ca}$ calculated during the
stimulation interval we observed a "sensitive" behavior of the error
with coarse values of $relTol$ and an expected asymptotic behavior
with finer values of $relTol$.  In order to achieve the nice monotonic
behavior on the whole range of $relTol$, the second order polynomial
predictor used in the error estimation algorithm could be replaced by
the lower order predictor, for instance a linear
predictor~\cite{skelboe2000accuracy}. This would introduce some
over-estimation of the error for each time step and lead to the
smaller step sizes leading to descreased efficiency as shown in
Figure~\ref{fig:linear_predictor}. We propose it to be a personal
choice between the efficiency and the nice monotonic behavior in
global error decay.

\begin{figure}[h]
  \centering
  % \includegraphics[width=0.95\textwidth]{./figures/final_fft_new.eps}
  \caption{{\bf Efficiency comparison between a second-order
      polynomial ("square" markers) and a linear ("asterisk" markers)
      predictors used in Mode~3 with Jacobi organization between
      system components.} The datapoints represent the solutions
    obtained with $relTol=\{10^{-3};~5\cdot 10^{-4};~5\cdot
    10^{-5};~5\cdot 10^{-6}\}$. An extra datapoint with $relTol=5\cdot
    10^{-7}$ is plotted for the solution obtained with the
    second-order polynomial predictor. The dashed line correspond to
    the second order decline.}
  \label{fig:linear_predictor}
\end{figure}


A significant advantage of Mode~3 over Mode~1 with respect to the
calculated relative errors confirmed an importance of an appropriate
choice between different approximation strategies of exchanged
variables.


\subsection*{Organization of system components had to be taken into
  consideration}
\label{sec:organization_comparison}
We investigated whether different organizations between the components
in our system had any effect on accuracy of the solution. Having a
strong influence of the electrical component on the biochemical during
the stimulation interval we predicted that by letting the electrical
component lead the integration we could possibly avoid an
approximation error of the exchanged variables and improve overall
performance. The results we observed were consistent with our
expectations (Figure~\ref{fig:org_comparison}). Gauss-Seidel
organization with the electrical component solved first in Mode~1 led
to the more accurate results.


\begin{figure}[h]
  \centering
  % \begin{subfigure}{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/adaptive_ka_org.eps}
  %   \caption{The accuracy of the solution is based on the $e_{Ka}$
  %   relative error measurements.}
  % \end{subfigure}
  % \begin{subfigure}{0.45\textwidth}
  %   \includegraphics[width=\textwidth]{./figures/adaptive_fft_org.eps}
  %   \caption{The accuracy of the solution is based on the $e_{Ca}$
  %   relative error measurements.}
  % \end{subfigure}
  \caption{{\bf Efficiency comparison between Jacobi ('plus' markers),
      Gauss-Seidel with electrical component solved first ('asterisk'
      markers) and Gauss-Seidel with biochemical component solved
      first ('circle' markers) organizations in Mode~1.} The
    datapoints on the figures correspond to the
    $relTol=\{10^{-3};~5\cdot 10^{-4};~5\cdot 10^{-5};~5\cdot
    10^{-6}\}$. The dashed lines are the first and second order
    declines. \\ (A) The accuracy of the solution is based on the
    $e_{Ka}$ relative error measurements.\\ (B) The accuracy of the
    solution is represented by the $e_{Ca}$ relative error. }
  \label{fig:org_comparison}
\end{figure}


We also compared different organizations in Mode~3 (not shown). We
noticed a slight superiority of Gauss-Seidel organization with the
electrical component solved first at the datapoint $relTol=1e-3$ while
with the finest values of $relTol$ the difference was almost
indistinguishable.

We concluded that the Jacobi organization with the second order
polynomial (Mode~3) can be sufficiently accurate as the Gauss-Seidel
organization with an appropriate ordering of system components in
Mode~1.


\subsection*{A fixed ratio of step sizes led to a significant
  efficiency drop}\label{sec:fixed-ratio}
Multiscale problems usually span multiple time scales. The step sizes
required for numerical stability and desired accuracy usually differs
for different system components.  An implemented adaptive time
stepping algorithm defines an optimal integration step size equal for
each component in the system so that information is exchanged each
integration time step. That is $h_{elec,j+1}=h_{chem,j+1}=H_{j+1}$,
where $H_{j+1}=h_{j+1}$ is an optimal step size calculated by the
adaptive step size controller at time $t_{j+1}$
(Section~\ref{sec:adaptive_controller}).  Intuitively one would
imagine to use small steps for the fastest changing components and
larger steps in slow components, so called a \textit{multirate
  method}. The multirate approach can potentially reduce the number of
synchronization points between components keeping the discretization
error within acceptable bounds. Trying to mimic the multirate idea we
applied a fixed step size ratio knowing that electrical and
biochemical components have an approximate time ratio 1 to 100, that
is $h_{chem,j+1}/h_{elec,j+1} =1/100 $ for each macro time step
$H_{j+1}$. Thus we forced electrical component to take 100 steps per
one step of a biochemical one before the synchronization
occured. Unfortunately, this did not reduce the number of
synchronization points significantly, approximately by 2\%. However
the larger amount of micro steps used in electrical component caused
the macro step to be rejected very often. Thus, leading to almost
insufficient change in the number of synchronization points and an
enormous increase in function evaluations. One of the possible
explanations can be that the error accumulated between the
synchronization points started to dominate over the discretization
error. The latter caused the step size controller to reduce the length
of steps in order to keep the local error within acceptable bounds.

Furthermore, the lower ratios were applied using Jacobi and
Gauss-Seidel organizations. The results confidently showed a slight
decrease of synchronization points payed by a much more significant
increase in a computational cost.

A fixed ratio applied in this experiment can not be considered as an
optimal multirate configuration test case. The activity of different
components during the simulation has to be considered. For example, in
our test multiscale system we can determine which system component
sets the limit for the next step size. It is the dynamics of the
electrical component that is computationally expensive due to the
required finer discretization during the stimulation interval. However
it is the chemical component that defines the step size during the
larger part of the simulation time. Thus the electrical component is
an active one during the stimulation and the biochemical is an active
component for the rest of the simulation. Forcing a fixed ratio of
step sizes along the whole simulation time can cause an extra
computational cost. Possibly, considering an altering activity of the
components, an application of the dynamically adjusted ratio may speed
up the simulation. At the same time, considering that the 90\% of the
taken steps resided in the stimulation time interval in our system we
do not expect an application of adjusted ratio to cause a significant
change with respect to the computational cost.

\subsection*{Recapitulation and future directions}
The proposed coupling methods and conclusions are valid for the
systems given by ODEs and DAEs. The paper did not address the
numerical questions and problems that can arise while coupling the
models described by different mathematical formalisms, for instance
stochastic-deterministic coupling.

In our test multiscale system the most computational expensive
component was active only 1/9 of the total simulation time. An
application of the proposed methods to a larger biological system
should be considered in the future.

In Figure~\ref{fig:changedrelTol - With Fixed} we present our best
results obtained with the adaptive step size BDF2-BDF2 coupling method
in comparison with the fixed step size mCN-RK4 coupling. The BDF2-BDF2
coupling in Mode~3 with Jacobi organization (the curve with the
"square" markers) showed equally accurate results to the BDF2-BDF2
coupling in Mode~1 with Gauss-Seidel organization where the fast
system was solved first (the curve with the "asterik" markers). Both
methods allowed us to obtain solution in a much more efficient way in
comparison with the mCN-RK4 coupling previously used in similar
multiscale
problems~\cite{bhalla2011multiscale,mattioni2013integration}.

\begin{figure}[h]
  \centering
  % \includegraphics[width=0.95\textwidth]{./figures/final_fft_new.eps}
  \caption{{\bf Efficiency comparison between the proposed methods
      ("square" and "asterisk" markers) and the methods considered so
      far in neuroscience ("circle" arkers) to solve multiscale
      problems.} The datapoints represent the solutions obtained with
    $relTol=\{10^{-3};~5\cdot 10^{-4};~5\cdot 10^{-5};~5\cdot
    10^{-6}\}$ and on the grid size with
    $h_{elec}=h_{chem}=\{1.125\cdot 10^{-4};~5.625\cdot
    10^{-5};~28.125\cdot 10^{-6}\}$, accordingly. An extra datapoint
    with $relTol=5\cdot 10^{-7}$ is plotted for the solution obtained
    with Jacobi organization in Mode~3. The dashed lines correspond to
    the first and second order declines.}
  \label{fig:changedrelTol - With Fixed}
\end{figure}


\section*{Supporting Information}

% Include only the SI item label in the subsection heading. Use the
% \nameref{label} command to cite SI items in the text.
% \subsection*{S1 Video}
% \label{S1_Video} {\bf Bold the first sentence.}  Maecenas convallis
% mauris sit amet sem ultrices gravida. Etiam eget sapien nibh. Sed ac
% ipsum eget enim egestas ullamcorper nec euismod ligula. Curabitur
% fringilla pulvinar lectus consectetur pellentesque.

% \subsection*{S1 Text}
% \label{S1_Text} {\bf Lorem Ipsum.} Maecenas convallis mauris sit
% amet sem ultrices gravida. Etiam eget sapien nibh. Sed ac ipsum eget
% enim egestas ullamcorper nec euismod ligula. Curabitur fringilla
% pulvinar lectus consectetur pellentesque.

% \subsection*{S1 Fig}
% \label{S1_Fig} {\bf Lorem Ipsum.} Maecenas convallis mauris sit amet
% sem ultrices gravida. Etiam eget sapien nibh. Sed ac ipsum eget enim
% egestas ullamcorper nec euismod ligula. Curabitur fringilla pulvinar
% lectus consectetur pellentesque.

% \subsection*{S1 Table}
% \label{S1_Table} {\bf Lorem Ipsum.} Maecenas convallis mauris sit
% amet sem ultrices gravida. Etiam eget sapien nibh. Sed ac ipsum eget
% enim egestas ullamcorper nec euismod ligula. Curabitur fringilla
% pulvinar lectus consectetur pellentesque.

% Do NOT remove this, even if you are not including acknowledgments.
\section*{Acknowledgments}
\nolinenumbers

% \section*{References}
% Either type in your references using
% \begin{thebibliography}{}
% \bibitem{} Text
% \end{thebibliography}
%
% OR
%
% Compile your BiBTeX database using our plos2009.bst style file and
% paste the contents of your .bbl file here.
% 

% \bibliography{plos_template}
\begin{thebibliography}{10}
  \providecommand{\url}[1]{\texttt{#1}}
  \providecommand{\urlprefix}{URL } \expandafter\ifx\csname
  urlstyle\endcsname\relax
  \providecommand{\doi}[1]{doi:\discretionary{}{}{}#1}\else
  \providecommand{\doi}{doi:\discretionary{}{}{}\begingroup
    \urlstyle{rm}\Url}\fi \providecommand{\bibAnnoteFile}[1]{%
    \IfFileExists{#1}{\begin{quotation}\noindent\textsc{Key:} #1\\
        \textsc{Annotation:}\ \input{#1}\end{quotation}}{}}
  \providecommand{\bibAnnote}[2]{%
    \begin{quotation}\noindent\textsc{Key:} #1\\
      \textsc{Annotation:}\ #2\end{quotation}}
  \providecommand{\eprint}[2][]{\url{#2}}

\bibitem{bhalla2011multiscale} Bhalla US (2011) Multiscale
  interactions between chemical and electric signaling in ltp
  induction, ltp reversal and dendritic excitability.  \newblock
  Neural Networks 24: 943--949.  \bibAnnoteFile{bhalla2011multiscale}

\bibitem{mattioni2013integration} Mattioni M, Le~Nov{\`e}re N (2013)
  Integration of biochemical and electrical signaling-multiscale model
  of the medium spiny neuron of the striatum.  \newblock PloS one 8:
  e66811.  \bibAnnoteFile{mattioni2013integration}

\bibitem{skelboe2000accuracy} Skelboe S (2000) Accuracy of decoupled
  implicit integration formulas.  \newblock SIAM Journal on Scientific
  Computing 21: 2206--2224.  \bibAnnoteFile{skelboe2000accuracy}

\bibitem{shukla2009seamless} Shukla J (2009) Seamless prediction of
  weather and climate: A new paradigm for modeling and prediction
  research.  \newblock In: Climate Test Bed Joint Seminar Series.
  \bibAnnoteFile{shukla2009seamless}

\bibitem{kurowski2013toward} Kurowski MJ, Grabowski WW, Smolarkiewicz
  PK (2013) Toward multiscale simulation of moist flows with
  soundproof equations.  \newblock Journal of the Atmospheric Sciences
  70: 3995--4011.  \bibAnnoteFile{kurowski2013toward}

\bibitem{hernandez2011integration} Hern{\'a}ndez AI, Le~Rolle V, Ojeda
  D, Baconnier P, Fontecave-Jallon J, et~al.  (2011) Integration of
  detailed modules in a core model of body fluid homeostasis and blood
  pressure regulation.  \newblock Progress in biophysics and molecular
  biology 107: 169--182.  \bibAnnoteFile{hernandez2011integration}

\bibitem{bhalla2014multiscale} Bhalla US (2014) Multiscale modeling
  and synaptic plasticity.  \newblock Progress in molecular biology
  and translational science 123: 351.
  \bibAnnoteFile{bhalla2014multiscale}

\bibitem{djurfeldt2007workshop} Djurfeldt M, Lansner A (2007) Workshop
  report: 1st incf workshop on large-scale modeling of the nervous
  system .  \bibAnnoteFile{djurfeldt2007workshop}

\bibitem{kubler2000two} K{\"u}bler R, Schiehlen W (2000) Two methods
  of simulator coupling.  \newblock Mathematical and Computer
  Modelling of Dynamical Systems 6: 93--113.
  \bibAnnoteFile{kubler2000two}

\bibitem{arnold2001preconditioned} Arnold M, G{\"u}nther M (2001)
  Preconditioned dynamic iteration for coupled differential-algebraic
  systems.  \newblock BIT Numerical Mathematics 41: 1--25.
  \bibAnnoteFile{arnold2001preconditioned}

\bibitem{brandi2011connecting} Brandi M, Brocke E, Talukdar HA, Hanke
  M, Bhalla US, et~al. (2011) Connecting moose and neurord through
  music: towards a communication framework for multi-scale modeling.
  \newblock BMC Neuroscience 12: P77.
  \bibAnnoteFile{brandi2011connecting}

\bibitem{rudiger2007hybrid} R{\"u}diger S, Shuai J, Huisinga W,
  Nagaiah C, Warnecke G, et~al. (2007) Hybrid stochastic and
  deterministic simulations of calcium blips.  \newblock Biophysical
  journal 93: 1847--1857.  \bibAnnoteFile{rudiger2007hybrid}

\bibitem{alfonsi2005adaptive} Alfonsi A, Canc{\`e}s E, Turinici G,
  Di~Ventura B, Huisinga W (2005) Adaptive simulation of hybrid
  stochastic and deterministic models for biochemical systems.
  \newblock In: ESAIM: Proceedings. EDP Sciences, volume~14,
  pp. 1--13.  \bibAnnoteFile{alfonsi2005adaptive}

\bibitem{rall1964theoretical} Rall W (1964) Theoretical significance
  of dendritic trees for neuronal input-output relations.  \newblock
  Neural theory and modeling : 73--97.
  \bibAnnoteFile{rall1964theoretical}

\bibitem{hodgkin1952quantitative} Hodgkin AL, Huxley AF (1952) A
  quantitative description of membrane current and its application to
  conduction and excitation in nerve.  \newblock The Journal of
  physiology 117: 500.  \bibAnnoteFile{hodgkin1952quantitative}

\bibitem{hines1989program} Hines M (1989) A program for simulation of
  nerve equations with branching geometries.  \newblock International
  journal of bio-medical computing 24: 55--68.
  \bibAnnoteFile{hines1989program}

\bibitem{wilson1990genesis} Wilson MA, Bhalla US, Uhley JD, Bower JM
  (1990) Genesis: A system for simulating neural networks.  \newblock
  In: Proc. of. volume~89.  \bibAnnoteFile{wilson1990genesis}

\bibitem{dudani2009multiscale} Dudani N, Ray S, George S, Bhalla US
  (2009) Multiscale modeling and interoperability in moose.  \newblock
  BMC Neuroscience 10: P54.  \bibAnnoteFile{dudani2009multiscale}

\bibitem{bhalla1998network} Bhalla US (1998) The network within:
  signaling pathways.  \newblock In: The Book of GENESIS,
  Springer. pp. 169--191.  \bibAnnoteFile{bhalla1998network}

\bibitem{hines1984efficient} Hines M (1984) Efficient computation of
  branched nerve equations.  \newblock International journal of
  bio-medical computing 15: 69--76.
  \bibAnnoteFile{hines1984efficient}

\bibitem{deuflhard2002scientific} Deuflhard P, Bornemann F (2002)
  Scientific computing with ordinary differential equations,
  volume~42.  \newblock Springer.
  \bibAnnoteFile{deuflhard2002scientific}

\bibitem{hayer2005molecular} Hayer A, Bhalla US (2005) Molecular
  switches at the synapse emerge from receptor and kinase traffic.
  \newblock PLoS computational biology 1: e20.
  \bibAnnoteFile{hayer2005molecular}

\bibitem{shampine1997matlab} Shampine LF, Reichelt MW (1997) The
  matlab ode suite.  \newblock SIAM journal on scientific computing
  18: 1--22.  \bibAnnoteFile{shampine1997matlab}

\end{thebibliography}

\end{document}

