\documentclass[aps,prb,preprint,showkeys]{revtex4}
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{epsfig}
\setcounter{MaxMatrixCols}{30}
%EndMSIPreambleData
\makeatletter
\def\@dotsep{4.5pt}
\makeatother
\newtheorem{theorem}{Theorem}
\newtheorem{acknowledgement}[theorem]{Acknowledgement}
\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{axiom}[theorem]{Axiom}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{conclusion}[theorem]{Conclusion}
\newtheorem{condition}[theorem]{Condition}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{criterion}[theorem]{Criterion}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{notation}[theorem]{Notation}
\newtheorem{problem}[theorem]{Problem}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{solution}[theorem]{Solution}
\newtheorem{summary}[theorem]{Summary}
\newenvironment{proof}[1][Proof]{\noindent\textbf{#1.} }{\ \rule{0.5em}{0.5em}}
\begin{document}
\preprint{NMLII/R1 - Please do not circulate}
\title[Normal Mode Langevin Dynamics]{Implementation of CNMA}
\author{Christopher R. Sweet$^{\mbox{a}}$}
\affiliation{a) Center for Research Computing}
\keywords{Normal mode, Hessian, time coarse-graining,
Brownian dynamics, slow dynamics subspace}
\pacs{PACS number}

%--------------------------------------------------------------------------  %
%Abstract
%--------------------------------------------------------------------------  %

\begin{abstract}
The study of large proteins and biomolecules routinely require the diagonalization
of the mass weighed Hessian of the Potential energy function. There is a 
large body of evidence that indicates that this process decomposes the dynamical 
space of the molecule, according to the normal modes, to identify spaces of
dynamical interest. This process is generally expensive, with $\mathcal{O}(N^3)$
computational time and $\mathcal{O}(N^2)$ memory. We have developed a
diagonalization method based on coarse-graining the mass weighed Hessian that is scalable.
CNMA is a 2-level, direct method that uses a dimensionality reduction
strategy that allows computation of low frequency modes in
$\mathcal{O}(N^{9/5})$ time and $\mathcal{O}(N)$ memory. Coarse-grained
Normal Mode Analysis (CNMA) has
been implemented in the open source software \textsc{ProtoMol}. 
Implementation details appear after the method discussion.

\end{abstract}
\volumeyear{2010}
\volumenumber{number}
\issuenumber{number}
\eid{identifier}
\date{February 16, 2010}
\received[Received text]{date}

\revised[Revised text]{date}

\accepted[Accepted text]{date}

\published[Published text]{date}

\maketitle
\tableofcontents

%--------------------------------------------------------------------------  %
% Rediagonalization scheme                                                   %
%--------------------------------------------------------------------------  %
\section{Introduction}
Motivate the need to diagonalize Hessians.

%--------------------------------------------------------------------------  %
% Rediagonalization scheme                                                   %
%--------------------------------------------------------------------------  %


\section{Coarse Grained Hessian re-diagonalization}
The Coarse-grained Normal Mode Analysis (CNMA) method seeks to reduce the
cost of diagonalizing the Hessian of the Potential Energy (PE) function of a protein
or biomolecule by using structural knowledge of the molecule. Direct diagonalization
of the Hessian matrix has computational cost of  $\mathcal{O}(N^3)$ and memory 
use grows as $\mathcal{O}(N^2)$. By contrast CNMA has computational cost of  
$\mathcal{O}(N^{9/5})$ and memory use grows as $\mathcal{O}(N)$.
Competing techniques such as Krylov subspace methods
can be effective for very sparse systems and few eigenvectors. However convergence 
can be slow and the sparsity conditions are not met where long range electrostatic  forces
need to be accurately calculates, for example in the simulation of RNA etc.

\subsection{Dimensionality reduction strategy.}  The coarse-graining strategy to
diagonalize a \emph{mass weighted} Hessian ${\mathbf{H}}=\mathbf{M}^{-\frac{1}{2}}\mathbf{U_{xx}}\mathbf{M}^{-\frac{1}{2}}$ 
(where $\mathbf{U_{xx}}$ is the second derivative of the Potential Energy w.r.t. positions) is based on 2 ideas. The first is
to find a reduced set of normalized column vectors, forming  $3N\times n$  matrix ${\mathbf{E}}$ (assuming $N$ atoms), whose \emph{span} contains
the $m$ low frequency column eigenvectors of interest, denoted matrix $\mathbf{C}$. The second is to find an
orthogonal set of vectors $\mathbf{V}$ with the same span as ${\mathbf{E}}$, which are
ordered according to the diagonal elements of $\mathbf{V}^{\mathrm{T}}\mathbf{H}\mathbf{V}$.  We
show that the
span of the first $m$ columns of ${\mathbf{V}}$ still spans $\mathbf{C}$ and so
approximates the low frequency eigenvectors.  
The determination of ${\mathbf{V}}$ can be achieved by diagonalizing the $n\times n$ matrix  $\mathbf{S}=\mathbf{E}^{\mathrm{T}}\mathbf{H}\mathbf{E}$
with computational cost will be $\mathcal{O}(n^2)$, as shown in Figure \ref{strategy1}.
By the judicious choice of ${\mathbf{E}}$, and hence $n$, the method can be made $\mathcal{O}(N^{9/5})$ 
computational cost.
%To keep computational
%cost low, we form ${\mathbf{H}}$, and matrix-vector products involving $\mathbf{H}$, in
%linear cost, $\mathcal{O}(N)$. 

\begin{figure}[ht]
\centering
    \includegraphics[width=4.8in]{InnerDiagFigure.eps}
    \caption{\label{strategy1}Dimensionality reduction strategy for CNMA.}
\end{figure}

\subsection{Choice of matrix ${\mathbf{E}}$ to span the eigenvectors of interest.}
In order to form $\mathbf{E}$ we consider a new model where the protein 
consists of independent blocks made up from a number of residues. The scaling
of the method is based on a judicious choice of residues per block as discussed
in Section \ref{scale1}.

By considering 
arbitrary rotation and translation of these blocks a super-set of possible
block motions can be determined. However, this technique will ignore
important low frequency motions \emph{within} the blocks, generally related
to essential backbone and large side-chain dihedral motion. For this reason
our technique diagonalizes a \emph{modified} (see below) mass weighted Hessian for each 
individual block, $\tilde{\mathbf{H}}_{ij}$ for block $i,j$, to determine:
\begin{itemize}
\item The first 6 vectors related to the conserved degrees of freedom (due to block independence).
\item A number of additional vectors related to low frequency motions within the blocks (generally dihedrals).
\end{itemize}
Clearly such a model allows more flexibility than the full protein model,
and hence the number of columns of ${\mathbf{E}}$, $n$, is greater 
than the target number of eigenvalues $m$.

Our hypothesis is that interactions among residues responsible for the low
frequency space of interest will be included in the first few eigenvectors of the
block Hessian and need to be included in ${\mathbf{E}}$. 
The number of additional vectors for internal block motions will consist of 
two sources
\begin{itemize}
\item External (block connection) low frequency motions due to bonded interactions projected onto
the dihedral space, and will consist of 2-4 vectors due to backbone dihedrals of 
up to 2 connecting blocks.
 \item Internal low frequency motions, for instance due to side-chain dihedral
motions, also in the dihedral space. 
\end{itemize}

We chose the number of additional vectors according to a cuttoff based
on their associated eigenvalue to yield $k$ vectors in total. In order to facilitate 
resolving the 6 conserved dof (with eigenvalue 0) and the remaining $k-6$ 
\emph{internal} vectors we use an approximate version of the potential energy
function that is considered to be at equilibrium. Here bonds, angles and dihedrals
are considered to be at their rest values. i.e. for a bond between atoms $i$ and $j$ we have
\begin{equation}
E_{\mathrm{BOND}_{ij}} = k_{ij}(|\mathbf{x}_i - \mathbf{x}_j| - b_0)^2,
\end{equation}
in this instance we replace $b_0$ with $|\mathbf{x}_i - \mathbf{x}_j|$.
We denote the \emph{modified} block Hessian
to be $\mathbf{\tilde{H}}_{ij}$ and note that, in the current method, the Lennard-Jones and Electrostatic forces are ignored.

We start from a block Hessian $\mathbf{\tilde{H}}$ in which each block $\mathbf{\tilde{H}}_{ij}$
(composed of 1 or more residues) is zero if $i\ne j$. The remaining
blocks on the diagonal are assumed to be independent of all other
blocks. If we assume we have $r$ blocks then
\begin{equation}
\label{blockhess2}\mathbf{\tilde{H}}=\left[
\begin{array}{ccccc}
\mathbf{\tilde{H}}_{1,1} & \mathbf{0} & \cdots & & \mathbf{0}\\
\mathbf{0} & \mathbf{\tilde{H}}_{2,2} & \cdots & & \mathbf{0}\\
\vdots & \vdots & \vdots & & \vdots\\
\mathbf{0} & \mathbf{0} & \cdots & & \mathbf{0}\\
\mathbf{0} & \cdots & & \mathbf{0} & \mathbf{\tilde{H}}_{r,r}
\end{array}\right].
\end{equation}



This block Hessian is then diagonalized, which is equivalent to
performing independent diagonalization for each block. The
block Hessian eigenvectors and eigenvalues, $\mathcal{Q}_{i}$ and
${D}_{i}$, are calculated as follows:
\[
\mathbf{\tilde{H}}_{ii}\mathcal{Q}_{i}=\mathcal{Q}_{i}{D}_{i}.
\]
%GOT HERE IN RE-WRITE
\begin{table}[htb]

\begin{center}
  \begin{tabular}{| c | c | c | c | c | c | c | c | c | c | }
    \hline
    Residue: & ARG	& PRO	& ASP	& PHE	& CYS	& LEU	& GLU	& TYR	& GLY	\\ \hline
    No. vectors: & 9	& 3	& 5	& 7	& 4	& 6	& 7	& 7	& 2	\\ \hline \hline
    Residue: & LYS	& ALA	& ILE	& ASN	& GLN	& THR	& VAL	& SER	& MET \\ \hline
    No. vectors: & 8	& 3	& 8	& 6	& 8	& 5	& 5	& 5	& 9 \\ \hline
  \end{tabular}

\label{table:resvectors}
  \small{Table 1: Number of vectors $k$, excluding the first 6
corresponding to 0 eigenvalue, selected per residue type for BPTI,
  showing that larger residues require greater numbers of vectors.}
\end{center}
\end{table}

The number of vectors of $\mathcal{Q}_{i}$ included in $E$ varies
according to the residue composition.  We
expect that the eigenvectors identified above will correspond to the
first $k$ ordered eigenvalues.  The number $k$ varies between blocks
and is determined by selecting a cutoff frequency $f_c$, then
% from the block eigenvalues. Then we have
\begin{equation}
k=\left|\{\lambda_i|\lambda_i\le f_c^2\}\right|,
\end{equation}
for eigenvalues $\lambda_i$.

Alternatively an \emph{average}
number of vectors per block or block degrees of freedom, denoted `$\mathrm{bdof}$', can be chosen. The corresponding 
cutoff frequency $f_{\mathrm{bdof}}$ can be found as the $(\mathrm{bdof}\times r)^{\mathrm{th}}$ element ($r$ the number of blocks)
of the \emph{ordered} set of eigenvalues from \emph{all} blocks such that
\begin{equation}
f_{\mathrm{bdof}}^2=\lambda_{(\mathrm{bdof}\times r)}.
\end{equation}

Table~1 gives values of $k$ for BPTI, where the
$\mathrm{bdof}=12,$ (including first 6 eigenvectors) and where each
block has only 1 residue. As expected, larger residues such as ARG
require a greater number of vectors to describe their low frequency
motions than smaller ones like GLY. 

%We refer to the average number of these vectors by block as the block d.o.f. ($\mathit{bdof}$). 

To construct $\mathbf{E}$ we select the first $k$ vectors from $\mathcal{Q}_{i}$,
Given that the $i^{\mathrm{th}}$ block Hessian represents $p$ atoms then
$\mathcal{Q}_{i}$ is an $p\times p$ matrix from which we select
the first $k$ column vectors to form the $p\times k$ matrix ${Q}_{i}$.
The corresponding $3N\times k$ block `Eigenvector' matrix `column' is then
\begin{equation}
\label{shorteig2}\mathbf{{E}}_i=\left[
\begin{array}{c}
\mathbf{0}\\
\vdots\\
\mathbf{0}\\
\mathbf{\bar{Q}}_i\\
\mathbf{0}\\
\vdots\\
\mathbf{0}
\end{array}\right].
\end{equation}

The $3N\times n$ matrix $\mathbf{E}$ is then 
\begin{equation}
\mathbf{E} = \left[\mathbf{{E}}_0~ \mathbf{{E}}_1 \cdots\mathbf{{E}}_i\cdots \mathbf{{E}}_r\right].
\end{equation}

\subsection{`Inner' diagonalization}
We calculate the mass weighted Hessian $\mathbf{H}$ using cutoffs (with at least \emph{C}2 switches), to give a sparse matrix.
For applications with full electrostatic calculations other dimension reduction 
techniques can be used in addition to those described here.

We denote the $i^{\mbox{th}}$ normalized eigenvector of the mass weighted
Hessian $\mathbf{H}$ as $\mathbf{v}_i$ and the matrix of column
eigenvectors $\mathbf{V}$. The eigenvectors are sorted such that
$\lambda_1\le\lambda_2\le\cdots\le\lambda_{3N}$, for their
corresponding eigenvalues. To extract approximations to these
eigenvectors we find the `inner' diagonalization as follows. We find
$n\times n$ matrix $\mathbf{S}$
\begin{equation}
\label{innermat1}\mathbf{S} = \mathbf{E}^{\mathrm{T}}\mathbf{H}\mathbf{E}.
\end{equation}
We then (cheaply) diagonalize the symmetric matrix $\mathbf{S}$ to find
orthonormal matrix $\mathbf{Q}$ s.t.
\begin{equation}
\mathbf{S}\mathbf{Q} = \mathbf{Q}\mathbf{D},
\end{equation}
for diagonal matrix $\mathbf{D}$. We can then write
\begin{equation}
\mathbf{U}^{\mathrm{T}}\mathbf{H}\mathbf{U}=\mathbf{D},
\end{equation}
for our approximate column eigenvectors $\mathbf{U}=\mathbf{E}\mathbf{Q}$ and the
diagonal of $\mathbf{D}$ representing their Rayleigh quotients. 

For our approximate eigenvectors we chose the first $m$ column vectors of the matrix $\mathbf{U}$
to form the $3N\times m$ matrix $\tilde{ \mathbf{V}}$. To determine $m$ we use the Rayleigh quotient $\sigma_i$
for the $i^{\mathrm{th}}$ column vector and assume that this is an upper bound on the corresponding 
largest eigenvalue in the space spanned by the first $i$ column vectors. The justification for
this assumption can be found in the following Section.

\subsection{Relationship between Rayleigh quotients for $\mathbf{U}$ and eigenvalues for $\mathbf{V}$.}

We denote the $i^{\mbox{th}}$
column vector of $\mathbf{U}$ as $\mathbf{u}_i$ ordered with increasing value of the diagonal
of $\mathbf{D}$. i.e. $\mathbf{D}_{1,1}\le\mathbf{D}_{2,2}\le\cdots\le\mathbf{D}_{n,n}$.
Our subspace of dynamical interest, $C$, is then defined as the span of the first
$m$ column vectors of $\mathbf{U}$ 

We define the $i^{\mbox{th}}$ diagonal of $\mathbf{D}$ as $\sigma_i=\mathbf{D}_{i,i}$.
Then
\begin{equation}
\sigma_i = \mathbf{u}_i^{\mathrm{T}}\mathbf{H}\mathbf{u}_i.
\end{equation}
We can write each vector $\mathbf{u}$ as a linear combination of the
eigenvectors $\mathbf{v}$
\begin{equation}
\mathbf{u}_i=\alpha_1^i\mathbf{v}_1+\alpha_2^i\mathbf{v}_2+\cdots+\alpha_{3N}^i\mathbf{v}_{3N},
\end{equation}
for scalars $\alpha_1^i,\alpha_2^i,\cdots,\alpha_{3N}^i$. Then
\begin{eqnarray}
\sigma_i &=&
(\alpha_1^i)^2\mathbf{v}_1^{\mathrm{T}}\mathbf{H}\mathbf{v}_1+(\alpha_2^i)^2\mathbf{v}_2^{\mathrm{T}}\mathbf{H}\mathbf{v}_2
+\cdots+(\alpha_{3N}^i)^2\mathbf{v}_{3N}^{\mathrm{T}}\mathbf{H}\mathbf{v}_{3N},\\
&=&
(\alpha_1^i)^2\lambda_1+(\alpha_2^i)^2\lambda_2+\cdots+(\alpha_{3N}^i)^2\lambda_{3N},
\end{eqnarray}
for eigenvalues of $\mathbf{H}$, $\lambda_i$. Since our vectors are
normalized $\alpha_j^i<1~\forall j$. If the coefficients
$\alpha_j^i$ of a vector $\mathbf{u}_i$ are zero for all vectors
$\mathbf{v}_j$ for $j<a$ and $j>b$ then we have bounds
\begin{equation}
\lambda_a\le\sigma_i\le\lambda_b.
\end{equation}

We also have
\begin{eqnarray}
\mathbf{u}_i^{\mathrm{T}}\mathbf{H}\mathbf{u}_j &=&
\alpha_1^i\alpha_1^j\mathbf{v}_1^{\mathrm{T}}\mathbf{H}\mathbf{v}_1+\alpha_2^i\alpha_2^j\mathbf{v}_2^{\mathrm{T}}\mathbf{H}\mathbf{v}_2
+\cdots+\alpha_{3N}^i\alpha_{3N}^j\mathbf{v}_{3N}^{\mathrm{T}}\mathbf{H}\mathbf{v}_{3N},\\
&=&
\alpha_1^i\alpha_1^j\lambda_1+\alpha_2^i\alpha_2^j\lambda_2+\cdots+\alpha_{3N}^i\alpha_{3N}^j\lambda_{3N}~=~0,
\end{eqnarray}
and
\begin{eqnarray}
\mathbf{u}_i^{\mathrm{T}}\mathbf{u}_j &=&
\alpha_1^i\alpha_1^j+\alpha_2^i\alpha_2^j+\cdots+\alpha_{3N}^i\alpha_{3N}^j~=~0.
\end{eqnarray}

For simplicity we will assume that the system has no conserved d.o.f. (the nullspace of
$\mathbf{H}$ has dimension zero) then
\begin{equation}
\alpha_k^i\alpha_k^j = 0 ~\forall k.
\end{equation}
This shows that each $\mathbf{u}_i$ is a linear combination of a unique set
of  eigenvectors of $\mathbf{H}$.

Given an eigenvector $\mathbf{v}_i$ of $\mathbf{H}$, if $\mathbf{v}_i\in C$ and 
$\mathbf{v}_i\notin C^{\bot}$ then, from the above, $\exists \mathbf{u}_l=\mathbf{v}_i$
for some $l$ and hence $\exists \sigma_l=\lambda_i$. From this, given that the $\mathbf{u}_j\in C$
are ordered s.t. $\sigma_1\le\sigma_2\le\cdots\le\sigma_m$ then the highest frequency 
in $C$, $f_{\mathrm{max}}$, satisfies 
\begin{equation}
f_{\mathrm{max}} \le \sqrt{\sigma_m}.
\end{equation}
Note: Where we have some eigenvector $\mathbf{v}_i\in C$ and
$\mathbf{v}_i\in C^{\bot}$ then the damping/relaxation of the 
dynamics projected onto $\mathbf{v}_i$ in $C^{\bot}$ will
preserve the stability of the method.

\subsection{Scaling}
\label{scale1}
The scaling with time for the `brute force' Lapack diagonalization method is known to be
$\mathcal{O}(N^3)$. For the coarse grained CNMA method using $b$ blocks we have the
cost of diagonalizing all $b$ blocks as $\mathcal{O}((N/b)^3)\times b = \mathcal{O}(N^3/b^2)$ and for the
small projected matrix as $\mathcal{O}(b^3),$ which has a minimum
cost when $b\propto N^{3/5},$ giving an estimated cost
of $\mathcal{O}(N^{9/5})$. This is borne out by the numerical
evidence. For the coarse grained method the
RAM resource usage is reduced from the `brute force' scaling of
$\mathcal{O}(N^2)$ to $\mathcal{O}(N)$. 

\section{Implementation}
The implementation resides in the following source files in {\emph{/src/protomol/integrator/hessian}}
{\scriptsize%small
\begin{verbatim}
BlockHessian.cpp				//Block Hessian class, finds blocks based on a number of adjacent residues
BlockHessian.h			
BlockHessianDiagonalize.cpp		//diagonalize the Block Hessian representation of the full Hessian
BlockHessianDiagonalize.h		
HessianInt.cpp 					//example calling class
HessianInt.h 
LapackProtomol.h				//Lapack defines
\end{verbatim}
}
and uses the following files in {\emph{/src/protomol/type/}}
{\scriptsize%small
\begin{verbatim}
BlockMatrix.cpp	//class for matrices forming arbitrary (contiguous) parts of a larger matrix
BlockMatrix.h		//includes product/sum etc. Supports Lapack where available
\end{verbatim}
}

\section{Python pseudo code}
R.J. Nowling has produced a python implementation to illustrate the process.
{\scriptsize%small
\begin{verbatim}
from numpy import linalg
from numpy import array
from scipy.sparse import csc_matrix
from scipy.sparse import linalg as splinalg
from scipy.linalg import block_diag

"""
Example implementation of Course-grained NMA

To Do:
1) Use more sparse matrices OR
2) Implement block matrices
"""

def findFrequencyCutoff(cutoff, eigenvalues):
    """
    Calculates the frequencies for each eigenvalues and returns
    the index of the first eigenvalue to be excluded.

    cutoff -- cutoff frequency
    
    eigenvalues -- sequences of eigenvalues, monotonically increasing with
    respect to absolute value

    returns k, an int between 0 and len(eigenvalues).  If all frequences are
    within the cutoff, returns len(eigenvalues).
    """

    for i in xrange(len(eigenvalues)):
        freq = abs(eigenvalues[i])
        if freq > cutoff:
            return i
    return len(eigenvalues)

def sort(eigenvalues, eigenvectors):
    """
    Sorts eigenvalues and eigenvectors to make them monotonically decreasing
    with respect to absolute value of the eigenvalues.  Modifies eigenvalues
    and eigenvectors in place.
    
    eigenvalues -- sequence of eigenvalues
    
    eigenvectors -- sequence of eigenvectors
    """
    pass

def modifiedHessianBlock(startingIndex, endingIndex):
    """
    Computes the modified Hessian blocks.  This assumes that atoms used in
    blocks are contiguous such that the block will be the set [startingIndex,
    endingIndex].

    startingIndex -- int from set {0, ..., 3 * N - 1}

    endingIndex -- int from set {1, ..., 3 * N - 1} and greater than startingIndex

    Returns square matrix with dimension size endingIndex - startingIndex + 1.
    """
    pass

def approximateDiagonalization(hessian, residues, blockSizes, bdof, numApprox):
    """
    Given:
    hessian -- an 3N x 3N Hessian of the potential energies of the system
    blockSizes -- the number of residues in each block
    residues -- a list of the number of atoms in each residue
    bdof -- average degrees of freedom per block to retain
    numApprox -- number of eigenvectors of the hessian to approximate

    It is assumed that the atoms are grouped by residue and that the residues are ordered according to their adjacency.

    Note: Does this cause a problem with sulfur bridges and bonds between atoms in non-adjacent residues?
    """

    # E submatrix eigenvectors and eigenvalues
    E_eigenvector_submatrices = []
    E_eigenvalue_submatrices = []
    E_submatrix_startingIndices = []
    E_submatrix_endingIndices = []

    # identify the blocks
    startingRes = 0
    for i in xrange(len(blockSizes)):
        numRes = blockSizes[i]
        endingRes = startingRes + numRes
        # find first and last Hessian indices for contiguous block
        # of atoms.  These refer to indices of Hessian, not atoms
        startingIndex = residues[startingRes].atomIndices[0] * 3
        endingIndex = residues[endingRes - 1].atomIndices[-1] * 3 + 2
        
        # calculate block of modified Hessian
        submatrix = modifiedHessianBlock(startingIndex, endingIndex)
        
        # find eigenvalues and right eigenvectors
        eigenvalues, eigenvectors = linalg.eig(submatrix)
        # in-place sort eigenvalues, eigenvectors by absolute magnitude of eigenvalues
        sortEigens(eigenvectors, eigenvalues)
        
        E_eigenvector_submatrices.append(eigenvectors)
        E_eigenvalue_submatrices.append(eigenvalues)
        E_submatrix_startingIndices.append(startingIndex)
        E_submatrix_endingIndices.append(endingIndex)

        startingRes = endingRes

    # Take subset of vectors we wish to use
    E_eigenvalues = []
    for e in E_eigenvalue_submatrices:
        E_eigenvalues.extend(e)
    sort(E_eigenvalues)
    numRetainedEigenvalues = bdof * len(blockSizes)
    cutoffFreq = abs(E_eigenvalues[numRetainedEigenvalues - 1])
    
    # produce E from eigenvectors retained with frequencies less than cutoff
    E = csc_matrix((len(hessian), numRetainedEigenvalues))
    startingCol = 0
    for i in len(blockSizes):
        eigenvalues = E_eigenvalues_submatrices[i]
        eigenvectors = E_eigenvector_submatrices[i]
        cutoff = findFrequencyCutoff(cutoffFreq, eigenvalues)
        startingIndex = E_submatrix_startingIndices[i]
        endingIndex = E_submatrix_endingIndices[i]
        endingCol = startingCol + cutoff
        E[startingIndex:endingIndex + 1, startingCol:endingCol] = eigenvectors
        startingCol = endingCol

    S = E.T * hessian * E

    S_eigenvalues, S_eigenvectors = linalg.eigh(S)
    sort(S_eigenvalues, S_eigenvectors)

    # approximate eigenvectors of hessian
    U = E * S_eigenvectors
    
    V = U[:numApprox]

    return V

\end{verbatim}
}


%-------------------------------------------------------------------------  %
%Appendix.                                                                  %
%-------------------------------------------------------------------------  %


%-------------------------------------------------------------------------  %
%Bibliography.                                                              %
%-------------------------------------------------------------------------  %


\bibliographystyle{apsrev}
\bibliography{lcls,nmbib}

\end{document}
