\documentclass[aps,prb,preprint,showkeys]{revtex4}
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{epsfig}
\setcounter{MaxMatrixCols}{30}
%EndMSIPreambleData
\makeatletter
\def\@dotsep{4.5pt}
\makeatother
\newtheorem{theorem}{Theorem}
\newtheorem{acknowledgement}[theorem]{Acknowledgement}
\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{axiom}[theorem]{Axiom}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{conclusion}[theorem]{Conclusion}
\newtheorem{condition}[theorem]{Condition}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{criterion}[theorem]{Criterion}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{notation}[theorem]{Notation}
\newtheorem{problem}[theorem]{Problem}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{solution}[theorem]{Solution}
\newtheorem{summary}[theorem]{Summary}
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\begin{document}
\preprint{NMLII/R1 - Please do not circulate}
\title[Normal Mode Langevin Dynamics]{Implementation of CNMA}
\author{Christopher R. Sweet$^{\mbox{a}}$}
\affiliation{a) Center for Research Computing}
\keywords{Normal mode, Hessian, time coarse-graining,
Brownian dynamics, slow dynamics subspace}
\pacs{PACS number}

%--------------------------------------------------------------------------  %
%Abstract
%--------------------------------------------------------------------------  %

\begin{abstract}
The study of large proteins and biomolecules routinely require the diagonalization
of the mass weighed Hessian of the Potential energy function. There is a 
large body of evidence that indicates that this process decomposes the dynamical 
space of the molecule, according to the normal modes, to identify spaces of
dynamical interest. This process is generally expensive, with $\mathcal{O}(N^3)$
computational time and $\mathcal{O}(N^2)$ memory. We have developed a
diagonalization method based on coarse-graining the mass weighed Hessian that is scalable.
CNMA is a 2-level, direct method that uses a dimensionality reduction
strategy that allows computation of low frequency modes in
$\mathcal{O}(N^{9/5})$ time and $\mathcal{O}(N)$ memory. Coarse-grained
Normal Mode Analysis (CNMA) has
been implemented in the open source software \textsc{ProtoMol}. 
Implementation details appear after the method discussion.

\end{abstract}
\volumeyear{2010}
\volumenumber{number}
\issuenumber{number}
\eid{identifier}
\date{February 16, 2010}
\received[Received text]{date}

\revised[Revised text]{date}

\accepted[Accepted text]{date}

\published[Published text]{date}

\maketitle
\tableofcontents

%--------------------------------------------------------------------------  %
% Rediagonalization scheme                                                   %
%--------------------------------------------------------------------------  %
\section{Introduction}
Motivate the need to diagonalize Hessians.

%--------------------------------------------------------------------------  %
% Rediagonalization scheme                                                   %
%--------------------------------------------------------------------------  %


\section{Coarse Grained Hessian re-diagonalization}
The Coarse-grained Normal Mode Analysis (CNMA) method seeks to reduce the
cost of diagonalizing the Hessian of the Potential Energy (PE) function of a protein
or biomolecule by using structural knowledge of the molecule. Direct diagonalization
of the Hessian matrix has computational cost of  $\mathcal{O}(N^3)$ and memory 
use grows as $\mathcal{O}(N^2)$. By contrast CNMA has computational cost of  
$\mathcal{O}(N^{9/5})$ and memory use grows as $\mathcal{O}(N)$.
Competing techniques such as Krylov subspace methods
can be effective for very sparse systems and few eigenvectors. However convergence 
can be slow and the sparsity conditions are not met where long range electrostatic  forces
need to be accurately calculates, for example in the simulation of RNA etc.

\subsection{Dimensionality reduction strategy.}  The coarse-graining strategy to
diagonalize a \emph{mass weighted} Hessian ${\mathbf{H}}=\mathbf{M}^{-\frac{1}{2}}\mathbf{U_{xx}}\mathbf{M}^{-\frac{1}{2}}$ 
(where $\mathbf{U_{xx}}$ is the second derivative of the Potential Energy w.r.t. positions) is based on 2 ideas. The first is
to find a reduced set of normalized column vectors, forming  $3N\times n$  matrix ${\mathbf{E}}$ (assuming $N$ atoms), whose \emph{span} contains
the $m$ low frequency column eigenvectors of interest, denoted matrix $\mathbf{C}$. The second is to find an
orthogonal set of vectors $\mathbf{V}$ with the same span as ${\mathbf{E}}$, which are
ordered according to the diagonal elements of $\mathbf{V}^{\mathrm{T}}\mathbf{H}\mathbf{V}$.  We
show that the
span of the first $m$ columns of ${\mathbf{V}}$ still spans $\mathbf{C}$ and so
approximates the low frequency eigenvectors.  
The determination of ${\mathbf{V}}$ can be achieved by diagonalizing the $n\times n$ matrix  $\mathbf{S}=\mathbf{E}^{\mathrm{T}}\mathbf{H}\mathbf{E}$
with computational cost will be $\mathcal{O}(n^2)$, as shown in Figure \ref{strategy1}.
By the judicious choice of ${\mathbf{E}}$, and hence $n$, the method can be made $\mathcal{O}(N^{9/5})$ 
computational cost.
%To keep computational
%cost low, we form ${\mathbf{H}}$, and matrix-vector products involving $\mathbf{H}$, in
%linear cost, $\mathcal{O}(N)$. 

\begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{InnerDiagFigure.eps}
    \caption{\label{strategy1}Dimensionality reduction strategy for CNMA.}
\end{figure}

\subsection{Choice of matrix ${\mathbf{E}}$ to span the eigenvectors of interest.}
In order to form $\mathbf{E}$ we consider a new model where the protein 
consists of independent blocks made up from a number of residues. The scaling
of the method is based on a judicious choice of residues per block as discussed
in Section \ref{scale1}.

By considering 
arbitrary rotation and translation of these blocks a super-set of possible
block motions can be determined. However, this technique will ignore
important low frequency motions \emph{within} the blocks, generally related
to essential backbone and large side-chain dihedral motion. For this reason
our technique diagonalizes a \emph{modified} (see below) mass weighted Hessian for each 
individual block, $\tilde{\mathbf{H}}_{ij}$ for block $i,j$, to determine:
\begin{itemize}
\item The first 6 vectors related to the conserved degrees of freedom (due to block independence).
\item A number of additional vectors related to low frequency motions within the blocks (generally dihedrals).
\end{itemize}
Clearly such a model allows more flexibility than the full protein model,
and hence the number of columns of ${\mathbf{E}}$, $n$, is greater 
than the target number of eigenvalues $m$.

Our hypothesis is that interactions among residues responsible for the low
frequency space of interest will be included in the first few eigenvectors of the
block Hessian and need to be included in ${\mathbf{E}}$. 
The number of additional vectors for internal block motions will consist of 
two sources
\begin{itemize}
\item External (block connection) low frequency motions due to bonded interactions projected onto
the dihedral space, and will consist of 2-4 vectors due to backbone dihedrals of 
up to 2 connecting blocks.
 \item Internal low frequency motions, for instance due to side-chain dihedral
motions, also in the dihedral space. 
\end{itemize}

We chose the number of additional vectors according to a cuttoff based
on their associated eigenvalue to yield $k$ vectors in total. In order to facilitate 
resolving the 6 conserved dof (with eigenvalue 0) and the remaining $k-6$ 
\emph{internal} vectors we use an approximate version of the potential energy
function that is considered to be at equilibrium. Here bonds, angles and dihedrals
are considered to be at their rest values. i.e. for a bond between atoms $i$ and $j$ we have
\begin{equation}
E_{\mathrm{BOND}_{ij}} = k_{ij}(|\mathbf{x}_i - \mathbf{x}_j| - b_0)^2,
\end{equation}
in this instance we replace $b_0$ with $|\mathbf{x}_i - \mathbf{x}_j|$.
We denote the \emph{modified} block Hessian
to be $\mathbf{\tilde{H}}_{ij}$ and note that, in the current method, the Lennard-Jones and Electrostatic forces are ignored.

We start from a block Hessian $\mathbf{\tilde{H}}$ in which each block $\mathbf{\tilde{H}}_{ij}$
(composed of 1 or more residues) is zero if $i\ne j$. The remaining
blocks on the diagonal are assumed to be independent of all other
blocks. If we assume we have $r$ blocks then
\begin{equation}
\label{blockhess2}\mathbf{\tilde{H}}=\left[
\begin{array}{ccccc}
\mathbf{\tilde{H}}_{1,1} & \mathbf{0} & \cdots & & \mathbf{0}\\
\mathbf{0} & \mathbf{\tilde{H}}_{2,2} & \cdots & & \mathbf{0}\\
\vdots & \vdots & \vdots & & \vdots\\
\mathbf{0} & \mathbf{0} & \cdots & & \mathbf{0}\\
\mathbf{0} & \cdots & & \mathbf{0} & \mathbf{\tilde{H}}_{r,r}
\end{array}\right].
\end{equation}



This block Hessian is then diagonalized, which is equivalent to
performing independent diagonalization for each block. The
block Hessian eigenvectors and eigenvalues, $\mathcal{Q}_{i}$ and
${D}_{i}$, are calculated as follows:
\[
\mathbf{\tilde{H}}_{ii}\mathcal{Q}_{i}=\mathcal{Q}_{i}{D}_{i}.
\]
%GOT HERE IN RE-WRITE
\begin{table}[htb]

\begin{center}
  \begin{tabular}{| c | c | c | c | c | c | c | c | c | c | }
    \hline
    Residue: & ARG	& PRO	& ASP	& PHE	& CYS	& LEU	& GLU	& TYR	& GLY	\\ \hline
    No. vectors: & 9	& 3	& 5	& 7	& 4	& 6	& 7	& 7	& 2	\\ \hline \hline
    Residue: & LYS	& ALA	& ILE	& ASN	& GLN	& THR	& VAL	& SER	& MET \\ \hline
    No. vectors: & 8	& 3	& 8	& 6	& 8	& 5	& 5	& 5	& 9 \\ \hline
  \end{tabular}

\label{table:resvectors}
  \small{Table 1: Number of vectors $k$, excluding the first 6
corresponding to 0 eigenvalue, selected per residue type for BPTI,
  showing that larger residues require greater numbers of vectors.}
\end{center}
\end{table}

The number of vectors of $\mathcal{Q}_{i}$ included in $E$ varies
according to the residue composition.  We
expect that the eigenvectors identified above will correspond to the
first $k$ ordered eigenvalues.  The number $k$ varies between blocks
and is determined by selecting a cutoff frequency $f_c$, then
% from the block eigenvalues. Then we have
\begin{equation}
k=\left|\{\lambda_i|\lambda_i\le f_c^2\}\right|,
\end{equation}
for eigenvalues $\lambda_i$.

Alternatively an \emph{average}
number of vectors per block or block degrees of freedom, denoted `$\mathrm{bdof}$', can be chosen. The corresponding 
cutoff frequency $f_{\mathrm{bdof}}$ can be found as the $(\mathrm{bdof}\times r)^{\mathrm{th}}$ element ($r$ the number of blocks)
of the \emph{ordered} set of eigenvalues from \emph{all} blocks such that
\begin{equation}
f_{\mathrm{bdof}}^2=\lambda_{(\mathrm{bdof}\times r)}.
\end{equation}

Table~1 gives values of $k$ for BPTI, where the
$\mathrm{bdof}=12,$ (including first 6 eigenvectors) and where each
block has only 1 residue. As expected, larger residues such as ARG
require a greater number of vectors to describe their low frequency
motions than smaller ones like GLY. 

%We refer to the average number of these vectors by block as the block d.o.f. ($\mathit{bdof}$). 

To construct $\mathbf{E}$ we select the first $k$ vectors from $\mathcal{Q}_{i}$,
Given that the $i^{\mathrm{th}}$ block Hessian represents $p$ atoms then
$\mathcal{Q}_{i}$ is an $p\times p$ matrix from which we select
the first $k$ column vectors to form the $p\times k$ matrix ${Q}_{i}$.
The corresponding $3N\times k$ block `Eigenvector' matrix `column' is then
\begin{equation}
\label{shorteig2}\mathbf{{E}}_i=\left[
\begin{array}{c}
\mathbf{0}\\
\vdots\\
\mathbf{0}\\
\mathbf{\bar{Q}}_i\\
\mathbf{0}\\
\vdots\\
\mathbf{0}
\end{array}\right].
\end{equation}

The $3N\times n$ matrix $\mathbf{E}$ is then 
\begin{equation}
\mathbf{E} = \left[\mathbf{{E}}_0~ \mathbf{{E}}_1 \cdots\mathbf{{E}}_i\cdots \mathbf{{E}}_r\right].
\end{equation}

\subsection{`Inner' diagonalization}
We calculate the mass weighted Hessian $\mathbf{H}$ using cutoffs (with at least \emph{C}2 switches), to give a sparse matrix.
For applications with full electrostatic calculations other dimension reduction 
techniques can be used in addition to those described here.

We denote the $i^{\mbox{th}}$ normalized eigenvector of the mass weighted
Hessian $\mathbf{H}$ as $\mathbf{v}_i$ and the matrix of column
eigenvectors $\mathbf{V}$. The eigenvectors are sorted such that
$\lambda_1\le\lambda_2\le\cdots\le\lambda_{3N}$, for their
corresponding eigenvalues. To extract approximations to these
eigenvectors we find the `inner' diagonalization as follows. We find
$n\times n$ matrix $\mathbf{S}$
\begin{equation}
\label{innermat1}\mathbf{S} = \mathbf{E}^{\mathrm{T}}\mathbf{H}\mathbf{E}.
\end{equation}
We then (cheaply) diagonalize the symmetric matrix $\mathbf{S}$ to find
orthonormal matrix $\mathbf{Q}$ s.t.
\begin{equation}
\mathbf{S}\mathbf{Q} = \mathbf{Q}\mathbf{D},
\end{equation}
for diagonal matrix $\mathbf{D}$. We can then write
\begin{equation}
\mathbf{U}^{\mathrm{T}}\mathbf{H}\mathbf{U}=\mathbf{D},
\end{equation}
for our approximate column eigenvectors $\mathbf{U}=\mathbf{E}\mathbf{Q}$ and the
diagonal of $\mathbf{D}$ representing their Rayleigh quotients. 

For our approximate eigenvectors we chose the first $m$ column vectors of the matrix $\mathbf{U}$
to form the $3N\times m$ matrix $\tilde{ \mathbf{V}}$. To determine $m$ we use the Rayleigh quotient $\sigma_i$
for the $i^{\mathrm{th}}$ column vector and assume that this is an upper bound on the corresponding 
largest eigenvalue in the space spanned by the first $i$ column vectors. The justification for
this assumption can be found in the following Section.

\subsection{Relationship between Rayleigh quotients for $\mathbf{U}$ and eigenvalues for $\mathbf{V}$.}

We denote the $i^{\mbox{th}}$
column vector of $\mathbf{U}$ as $\mathbf{u}_i$ ordered with increasing value of the diagonal
of $\mathbf{D}$. i.e. $\mathbf{D}_{1,1}\le\mathbf{D}_{2,2}\le\cdots\le\mathbf{D}_{n,n}$.
Our subspace of dynamical interest, $C$, is then defined as the span of the first
$m$ column vectors of $\mathbf{U}$ 

We define the $i^{\mbox{th}}$ diagonal of $\mathbf{D}$ as $\sigma_i=\mathbf{D}_{i,i}$.
Then
\begin{equation}
\sigma_i = \mathbf{u}_i^{\mathrm{T}}\mathbf{H}\mathbf{u}_i.
\end{equation}
We can write each vector $\mathbf{u}$ as a linear combination of the
eigenvectors $\mathbf{v}$
\begin{equation}
\mathbf{u}_i=\alpha_1^i\mathbf{v}_1+\alpha_2^i\mathbf{v}_2+\cdots+\alpha_{3N}^i\mathbf{v}_{3N},
\end{equation}
for scalars $\alpha_1^i,\alpha_2^i,\cdots,\alpha_{3N}^i$. Then
\begin{eqnarray}
\sigma_i &=&
(\alpha_1^i)^2\mathbf{v}_1^{\mathrm{T}}\mathbf{H}\mathbf{v}_1+(\alpha_2^i)^2\mathbf{v}_2^{\mathrm{T}}\mathbf{H}\mathbf{v}_2
+\cdots+(\alpha_{3N}^i)^2\mathbf{v}_{3N}^{\mathrm{T}}\mathbf{H}\mathbf{v}_{3N},\\
&=&
(\alpha_1^i)^2\lambda_1+(\alpha_2^i)^2\lambda_2+\cdots+(\alpha_{3N}^i)^2\lambda_{3N},
\end{eqnarray}
for eigenvalues of $\mathbf{H}$, $\lambda_i$. Since our vectors are
normalized $\alpha_j^i<1~\forall j$. If the coefficients
$\alpha_j^i$ of a vector $\mathbf{u}_i$ are zero for all vectors
$\mathbf{v}_j$ for $j<a$ and $j>b$ then we have bounds
\begin{equation}
\lambda_a\le\sigma_i\le\lambda_b.
\end{equation}

We also have
\begin{eqnarray}
\mathbf{u}_i^{\mathrm{T}}\mathbf{H}\mathbf{u}_j &=&
\alpha_1^i\alpha_1^j\mathbf{v}_1^{\mathrm{T}}\mathbf{H}\mathbf{v}_1+\alpha_2^i\alpha_2^j\mathbf{v}_2^{\mathrm{T}}\mathbf{H}\mathbf{v}_2
+\cdots+\alpha_{3N}^i\alpha_{3N}^j\mathbf{v}_{3N}^{\mathrm{T}}\mathbf{H}\mathbf{v}_{3N},\\
&=&
\alpha_1^i\alpha_1^j\lambda_1+\alpha_2^i\alpha_2^j\lambda_2+\cdots+\alpha_{3N}^i\alpha_{3N}^j\lambda_{3N}~=~0,
\end{eqnarray}
and
\begin{eqnarray}
\mathbf{u}_i^{\mathrm{T}}\mathbf{u}_j &=&
\alpha_1^i\alpha_1^j+\alpha_2^i\alpha_2^j+\cdots+\alpha_{3N}^i\alpha_{3N}^j~=~0.
\end{eqnarray}

For simplicity we will assume that the system has no conserved d.o.f. (the nullspace of
$\mathbf{H}$ has dimension zero) then
\begin{equation}
\alpha_k^i\alpha_k^j = 0 ~\forall k.
\end{equation}
This shows that each $\mathbf{u}_i$ is a linear combination of a unique set
of  eigenvectors of $\mathbf{H}$.

Given an eigenvector $\mathbf{v}_i$ of $\mathbf{H}$, if $\mathbf{v}_i\in C$ and 
$\mathbf{v}_i\notin C^{\bot}$ then, from the above, $\exists \mathbf{u}_l=\mathbf{v}_i$
for some $l$ and hence $\exists \sigma_l=\lambda_i$. From this, given that the $\mathbf{u}_j\in C$
are ordered s.t. $\sigma_1\le\sigma_2\le\cdots\le\sigma_m$ then the highest frequency 
in $C$, $f_{\mathrm{max}}$, satisfies 
\begin{equation}
f_{\mathrm{max}} \le \sqrt{\sigma_m}.
\end{equation}
Note: Where we have some eigenvector $\mathbf{v}_i\in C$ and
$\mathbf{v}_i\in C^{\bot}$ then the damping/relaxation of the 
dynamics projected onto $\mathbf{v}_i$ in $C^{\bot}$ will
preserve the stability of the method.

\subsection{Scaling}
\label{scale1}
We make the following two assumptions in our method that relate to scaling:
\begin{itemize}
\item[1] {\bf{The number of approximate eigenvectors that we want to calculate, $m$, remains constant for all
model sizes}}. This is consistent with our current simulation protocol where 10-12 vectors are used.
\item[2] {\bf{The average number of vectors per block (\emph{bdof}) is equal to the number of approximate eigenvectors
$m$}}. We observe that this would be true in the limit of dividing the model into only one block. For a model dividing the
model into $b$ \emph{de-coupled} blocks this can also be shown to be true.
\end{itemize}

The scaling with time for the `brute force' Lapack diagonalization method is known to be
$\mathcal{O}(N^3)$. For the coarse grained CNMA method using $b$ blocks we have the
cost of diagonalizing all $b$ blocks as $\mathcal{O}((N/b)^3)\times b = \mathcal{O}(N^3/b^2)$ and for the
small projected matrix as $\mathcal{O}(b^3),$ which has a minimum
cost when $b\propto N^{3/5},$ giving an estimated cost
of $\mathcal{O}(N^{9/5})$. This is borne out by the numerical
evidence. For the coarse grained method the
RAM resource usage is reduced from the `brute force' scaling of
$\mathcal{O}(N^2)$ to $\mathcal{O}(N)$. 

Results for diagonalizing a number of different models, from ww-fip35 to F1-ATPase
can be seen in Figure \ref{dscaling} and Table \ref{TABLE2}. The slope of
the line is $\frac{9}{5}$ as predicted from the theory above.

\begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{diagonalizeScalingTime.eps}
    \caption{\label{dscaling}Scaling results for CNMA.}
\end{figure}

\begin{table}
\begin{center}
  \begin{tabular}{| r | r | r | r | r | r |}
    \hline
    Molecule & No. Atoms & Brute force & & Coarse grained & \\
    &  & Time [s] & RAM [Gb] & Time [s] &  RAM [Gb]\\ \hline
    WWd & 551 & 14.4 & 0.044 & 0.37 & 0.012 \\ \hline
    BPTI & 882 & 59.9 & 0.112 & 0.89 & 0.031 \\ \hline
    Calmodulin & 2262 & 980.6 & 0.737 & 3.89 & 0.120 \\ \hline
    Tyr Kinase & 7214 & 31450.0 & 7.490 & 31.90 & 0.690 \\ \hline
    F1-ATPase & 51181 & 11.2$\times 10^6$ (est.) & 377 (est.) & 2156.00 & 12.04 \\ \hline
  \end{tabular}
\end{center}
  \caption{\label{TABLE2}Comparison of the `brute force'
diagonalization and the coarse grained method for different atomic
models.}
\end{table}

\section{Implementation}
The implementation resides in the following source files in {\emph{/src/protomol/integrator/hessian}}
{\scriptsize%small
\begin{verbatim}
BlockHessian.cpp				//Block Hessian class, finds blocks based on a number of adjacent residues
BlockHessian.h			
BlockHessianDiagonalize.cpp		//diagonalize the Block Hessian representation of the full Hessian
BlockHessianDiagonalize.h		
HessianInt.cpp 					//example calling class
HessianInt.h 
LapackProtomol.h				//Lapack defines
\end{verbatim}
}
and uses the following files in {\emph{/src/protomol/type/}}
{\scriptsize%small
\begin{verbatim}
BlockMatrix.cpp	//class for matrices forming arbitrary (contiguous) parts of a larger matrix
BlockMatrix.h		//includes product/sum etc. Supports Lapack where available
\end{verbatim}
}

\section{Mass weighting the Hessian}
Diagonalizing the system Hessian $\mathcal{H}$ will yield vectors that describe collective motion `directions'. 
However, if we consider the system Hamiltonian and use the resulting eigenvectors for a change of
coordinates then we see that the the kinetic energy term has introduced coupling through the new `mass' matrix
which is no longer diagonal. Ideally we want both the quadratic term of the potential energy
and the `mass' matrix to be diagonal to form a system of de-coupled oscillators (up to the cubic terms). 

The easiest way of accomplishing this is to do a change of variables $\tilde{x}=x\mathbf{M}^{-\frac{1}{2}}$
to remove the mass matrix from the kinetic energy term. The Hessian for the new variables is 
then $\mathbf{H}=\mathbf{M}^{-\frac{1}{2}}\mathcal{H}\mathbf{M}^{-\frac{1}{2}}$ or, by components
\begin{equation}
\mathbf{H}_{ij} = \frac{1}{\sqrt{m_i}\sqrt{m_j}}\frac{\partial^2\mathcal{H}}{\partial x_i\partial x_j}.
\end{equation}

We can re-write Eqn. (\ref{innermat1}) as
\begin{equation}
\label{nativeinner}
\mathbf{S} = \mathcal{E}^{\mathrm{T}}\mathcal{H}\mathcal{E},
\end{equation}
where $\mathcal{E}=\mathbf{M}^{-\frac{1}{2}}\mathbf{E}$.

\section{Metrics}
To measure how close the vectors produced from the approximate methods are to
the basis set for $C$ that we require, we use the following.

We are given a set of $m$ vectors, $\mathbf{v}_1, \mathbf{v}_2,\cdots,\mathbf{v}_m$, which form an 
orthonormal basis set for our subspace of interest $C$ (for example the first $m$ eigenvectors from a `brute force' 
diagonalization of the mass weighted system Hessian). Given a unit `approximate eigenvector' $\mathbf{u}_j$
we project onto $C$ to find vector $\tilde{\mathbf{u}}_j$
\begin{equation}
\tilde{\mathbf{u}}_j = \alpha_1\mathbf{v}_1 + \alpha_2\mathbf{v}_2 + \cdots + \alpha_m\mathbf{v}_m,
\end{equation}
where $\alpha_i = \mathbf{v}_i.\mathbf{u}_j$. Then
\begin{equation}
||\tilde{\mathbf{u}}_j-\mathbf{u}_j||_2 = \sqrt{1-\sum_i \alpha_i^2},
\end{equation}
and hence
\begin{equation}
\sum_i \alpha_i^2 = 1 \implies \tilde{\mathbf{u}}_j = \mathbf{u}_j.
\end{equation}

Our requirement that the `approximate eigenvectors' form a basis set for $C$ would 
coincide with the norm of each of these unit vectors $\{\mathbf{u}_j\}$, projected onto $C$, being unity.

\section{Numerical Hessians}
Eqn. (\ref{nativeinner}) now just looks like a change of variables, so calculating $\mathbf{S}$ 
numerically looks feasible at a cost of $\mathcal{O}(Nn)$. I assume this is what Peter is 
doing:

\begin{itemize}
\item Find $\nabla_{} U(\mathbf{x}_0)$ at the position we are diagonalizing $\mathbf{x}_0$.
\item For each of the $n$ columns of $\mathbf{S}$, for the $i^{\mathrm{th}}$ column, find position $\mathbf{x}_i=\mathbf{x}_0+\epsilon\,\mathcal{E}_i$,
where ${\epsilon}$ is a small scalar increment. Then calculate $\nabla_{} U(\mathbf{x}_i)$.
\item The $i^{\mathrm{th}}$ column of $\mathbf{S}$ is then $\frac{1}{\epsilon}\mathcal{E}^{\mathrm{T}}\left(\nabla_{} U(\mathbf{x}_i)-\nabla_{} U(\mathbf{x}_0)\right)$.
\end{itemize}

My approach here would be to implement the numerical solution for $\mathbf{S}$ within
the current framework in Protomol. This would give us a good basis for testing the changes one step at a time.

I have some concerns with the approach Peter is (I think) taking at the moment. At this point we do
not know if solving for $\mathbf{E}$ geometrically for the dihedral space will give good results. Also
including all dihedrals will not obtain the $\mathcal{O}\left(N^{\frac{9}{5}}\right)$ scaling.
If we couple this with the numerical $\mathbf{S}$ it will not be clear which change caused which result.

I really like the idea of obtaining the conserved d.o.f. geometrically as this will overcome some of the
problems that occurred when we tried to extend the method to $\mathcal{O}\left(N\log N\right)$ as the
$n^{\mathrm{th}},~ n>1$ iteration has sufficiently large blocks that we cannot use our trick of assuming
the forces are at a minimum as electrostatics become important.

\subsection{Results for Coarse Grained method and Numerical  $\mathbf{S}$}
I implemented the numerical generation of $\mathbf{S}$ as described above. For
comparison I used a `full' diagonalization on the ww-fip35 to generate a set of 12 reference 
eigenvectors (whose span is $C$). I then compared the results of the `coarse grained' method by projecting
each of the new (normalized) approximate eigenvectors onto the span of the reference set,
and measuring the resulting length. This is shown in Figure \ref{coarsenorms1}.

\begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainednorms.eps}
    \caption{\label{coarsenorms1}Norm of approximate eigenvectors projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

This is then repeated using the coarse grained method coupled
with the generation of $\mathbf{S}$ numerically. Results were obtained for 20 {\emph{bdof}}
for different $\epsilon$, as shown in Figure \ref{coarsenormsnum1}.

\begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainednormsnum.eps}
    \caption{\label{coarsenormsnum1}Norm of approximate eigenvectors generated
    from a numerically derived $\mathbf{S}$ with 20 {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

Finally the test is repeated with the optimal $\epsilon$ for 14, 16 and 20 {\emph{bdof}}
as seen in Figure \ref{coarsenormsnum2}.

\begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainednormsnum2.eps}
    \caption{\label{coarsenormsnum2}Norm of approximate eigenvectors generated
    from a numerically derived $\mathbf{S}$ with varying {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

\subsection{Observations}
The original `coarse grained' method behaves much as expected. The really interesting result
is that the numerical $\mathbf{S}$ version of the code performs better than the analytical, 
presumably due to the lack of approximation in the calculation of the block Hessian 
(introduces errors in the electrostatics).

Replacing the calculation of the full mass-weighted Hessian followed by the
quadratic product with the block vectors with the direct numerical calculation of $\mathbf{S}$
appears to work, based on these results.

\subsection{Results for Geometric Conserved Degrees of Freedom ({\emph{cdof}})}
\subsubsection{Translation}
I calculate the translational conserved degreed of freedom $T_i~i\in \{1,2,3\}$ as
\begin{eqnarray}
T_1 = \left\{\frac{\sqrt{m_1}}{n}, 0, 0, \frac{\sqrt{m_2}}{n}, 0, 0, \cdots, \frac{\sqrt{m_N}}{n}, 0, 0\right\},\\
T_2 = \left\{0,\frac{\sqrt{m_1}}{n}, 0, 0, \frac{\sqrt{m_2}}{n}, 0,  \cdots, 0,\frac{\sqrt{m_N}}{n}, 0\right\},\\
T_3 = \left\{0,0,\frac{\sqrt{m_1}}{n}, 0, 0, \frac{\sqrt{m_2}}{n},\cdots, 0,0, \frac{\sqrt{m_N}}{n}\right\}.
\end{eqnarray}
where
\begin{eqnarray}
n = \sqrt{m_1+m_2+\cdots+m_N}~.
\end{eqnarray}

For each block, for the ww-fip35 minimized model, the method found 3 vectors that 
were `close' to orthogonal to either $T_1$, $T_2$ or $T_3$. By `close', the resulting
vector after being `orthogonalized' had a norm of $<$ 0.001. This is really
a confidence test.

The results for 16 {\emph{bdof}} for the analytical method, the numerical S method
and the partial (translational) geometric $\mathcal{E}$ can be found in Figure \ref{partgeom1}.
The  partial (translational) geometric $\mathcal{E}$ gives some improvement but
removing the `minimized block' method is still inferior. Presumably the addition of
the remaining conserved dof will help.


 \begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainedpartgeom1.eps}
    \caption{\label{partgeom1}Norm of approximate eigenvectors with both analytical and 
    numerically derived $\mathbf{S}$ compared to partial (translational) geometric $\mathcal{E}$ with 16 {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

\subsubsection{Rotation}
I calculate the rotational degrees of freedom as the cross product of the rotation vector and
the difference between some central point and the atom position. If the rotation vector is normalized
the resulting vector has magnitude equal to the orthogonal distance to the rotation vector, as required.

Then $R_i~i\in \{1,2,3\}$ are given by
\begin{eqnarray}
R_i = \frac{\hat{R}_i}{||\hat{R}_i||},~\hat{R}_i=\{r_{i,1},r_{i,2},\cdots,r_{i,N}\},
\end{eqnarray}
where
\begin{eqnarray}
r_{1,j} = \sqrt{m_j}\{0,d_{j,z},-d_{j,y}\},
r_{2,j} = \sqrt{m_j}\{-d_{j,z},0,d_{j,x}\},
r_{2,j} = \sqrt{m_j}\{-d_{j,y},-d_{j,x},0\},
\end{eqnarray}
for vector $d$, with $xyz$ coordinates, representing the difference between the atom position and the center.

The choice of center is interesting. For our mass weighted system it requires that we use the {\emph{center of mass}}
if the rotational vectors are to be orthogonal to the translational vectors. However we note that the rotational
vector set is not orthogonal. My approach is to use $T_1,T_2,T_3,R_1$ as the first 4 vectors and generate two
vectors $\tilde{R}_2,\tilde{R}_3$ orthogonal to $R_1$ and $\tilde{R}_2$ respectively, based on $R_2,R_3$.

The first test is our {\emph{confidence}} test by finding how many vectors are removed from the diagonalized
set as described above. This worked as expected, typically removing 6 vectors, with an example output from the code as follows (where
the final norm is the value we would have to multiply the residual vector by to normalize it):

\begin{verbatim}
Hint:  Final norm was high = 8.7e+11 ,skipping.
Hint:  Final norm was high = 4.2e+11 ,skipping.
Hint:  Final norm was high = 8.1e+11 ,skipping.
Hint:  Final norm was high = 3.2e+11 ,skipping.
Hint:  Final norm was high = 2.8e+12 ,skipping.
Hint:  Final norm was high = 1.4e+12 ,skipping.
\end{verbatim}

The results for 16 {\emph{bdof}} for the analytical method, the numerical S method
and the partial (translational/rotational) geometric $\mathcal{E}$ can be found in Figure \ref{partgeom2}.
The  partial (translational/rotational) geometric $\mathcal{E}$ has improved
the `minimized block' method compared to the partial (translational) geometric $\mathcal{E}$.

 \begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainedpartgeom2.eps}
    \caption{\label{partgeom1}Norm of approximate eigenvectors with both analytical and 
    numerically derived $\mathbf{S}$ compared to partial (translational/rotational) geometric $\mathcal{E}$ with 16 {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

I also compared the results for 14 {\emph{bdof}} for the analytical and numerical $\mathbf{S}$ with 
the partial (translational/rotational) geometric $\mathcal{E}$. These can be seen in Figure \ref{partgeom2} with
a comparison to the best results for  16 {\emph{bdof}}.

 \begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainedpartgeom14.eps}
    \caption{\label{partgeom2}Norm of approximate eigenvectors with both analytical and 
    numerically derived $\mathbf{S}$ compared to partial (translational/rotational) geometric $\mathcal{E}$ with 14 {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

Finally I compare different number of  {\emph{bdof}}  for the partial (translational/rotational) geometric $\mathcal{E}$ in Figure  \ref{partgeom3}.

 \begin{figure}[h]
\centering
    \includegraphics[width=4.8in]{coarsegrainedpartgeomall.eps}
    \caption{\label{partgeom3}Norm of approximate eigenvectors with both analytical and 
    numerically derived $\mathbf{S}$ compared to partial (translational/rotational) geometric $\mathcal{E}$ with varying  {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}

\subsection{Numerical Block Hessians}
\label{numblkhess}
\subsubsection{Calculation of Block Hessians}
The Block Hessians were calculated as, for block $b$,
\begin{eqnarray}
\mathbf{H}_{i,j\in b} = \frac{ \nabla U(x_j)_i-\nabla U(x_0)_i}{\epsilon_n\sqrt{m_im_j}},
\end{eqnarray}
where $\nabla U(x_j)_i$ is the $i^{\mathrm{th}}$ element of the vector which is the derivative of the potential energy $U$ w.r.t. positions (-force)
and $x_j$ is the position vector 
where the $j^{\mathrm{th}}$ component of $x_0$ is perturbed by small scalar value $\epsilon_n$.
Note that the range of $i$ and $j$ is dependent on $b$.
Values of $\epsilon_n$ in the range of $10^{-9}$ to $10^{-12}$ were found to produce good results.

\subsubsection{Numerical Block Hessians with Rotation/Translation and numerical $\mathbf{S}$}

Test were carried out with numerically derived $\mathbf{S}$, geometric $\mathcal{E}$ and \emph{numeric} block Hessians with varying  {\emph{bdof}}.
Results look encouraging with improvements over preceding methods. Good results were obtained even with only 12 \emph{bdof}.

 \begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{coarsegrainedFullNumeric1.eps}
    \caption{\label{partgeom3}Norm of approximate eigenvectors with both analytical and 
    numerically derived $\mathbf{S}$ compared to geometric $\mathcal{E}$ and \emph{numeric} block Hessians with varying  {\emph{bdof}}, projected onto $C$, 
    the span of the eigenvectors.}
\end{figure}


\section{Implementation}

\subsection{Current Protomol implementation}
The current implementation for calculating the `small' block Hessians numerically is as follows.

For each block (a fixed number of residues) do the following
\begin{itemize}
\item Calculate the force at the diagonalization position $\mathbf{x}_0$.
\item For each degree of freedom, $i$, calculate the force at $\mathbf{x}_0 + \epsilon_i$ as described in Section \ref{numblkhess}.
\item Form the `small' block Hessian from using difference equations.
\end{itemize}

This was accomplished by writing a force calculation routine in the Block Hessian code, which does not support all of the
force field variations we require (implicit solvent etc.).

\subsection{Proposed Protomol method}
After discussion with James (Post meeting with Jesus), we came up with the following method that could be extended to OpenMM (but tested in Protomol).

The method requires that we write a modified switching function that, along with the normal characteristics) the switch has value zero
if the atoms are in different blocks and one otherwise, C2BlockSwitch.

All of the `small' block Hessians can now be calculated simultaneously (up to full parallelization). The algorithm is as follows.
\begin{itemize}
\item Calculate the force at the diagonalization position $\mathbf{x}_0$, using C2BlockSwitch.
\item For each sequential degree of freedom, $i$, for blocks, $j$, calculate the force at $\mathbf{x}_0 + \epsilon_{1_i}+ \epsilon_{2_i}+ \cdots + \epsilon_{b_i}$, as described in Section \ref{numblkhess}.
\item For each block, form the  `small' block Hessian  using difference equations using only the relevant section of the forces. This
is possible as the blocks are independent by the use of C2BlockSwitch.
\end{itemize}

This can be ported to OpenMM by adding the `block number' to the atom data and modifying the current OpenMM switches.






%-------------------------------------------------------------------------  %
%Appendix.                                                                  %
%-------------------------------------------------------------------------  %


%-------------------------------------------------------------------------  %
%Bibliography.                                                              %
%-------------------------------------------------------------------------  %


\bibliographystyle{apsrev}
\bibliography{lcls,nmbib}

\end{document}
