\documentclass[]{article}
\usepackage{amsmath,amsfonts,afterpage}
\usepackage{showlabels}
\usepackage[pdftex]{graphicx,color}
\newcommand{\normal}[2]{{\cal N}(#1,#2)} \newcommand{\La}{{\cal L}}
\newcommand{\nomf}{\tilde f} \newcommand{\COST}{\cal C}
\newcommand{\LL}{{\cal L}} \newcommand{\Prob}{\text{Prob}}
\newcommand{\field}[1]{\mathbb{#1}}
\newcommand\REAL{\field{R}}
\newcommand\Z{\field{Z}}
\newcommand\Polytope[1]{\field{P}_{#1}}
\newcommand\PolytopeN{\Polytope{N}}
\newcommand\PolytopeInf{\Polytope{\infty}}
\newcommand{\EV}[2]{\field{E}_{#1}\left[#2\right]}
\newcommand{\partialfixed}[3]{\left. \frac{\partial #1}{\partial
      #2}\right|_#3}
\newcommand{\dhot}{=_{\text{dhot}}}
\newcommand{\argmin}{\operatorname*{argmin}}
\newcommand{\argmax}{\operatorname*{argmax}}
\newcommand{\set}[1]{{\cal #1}}
\newcommand\inner[2]{\left<#1,#2\right>}
\newcommand\norm[1]{\left|#1\right|}
\newcommand\bv{\mathbf{v}}
\newcommand\bw{\mathbf{w}}
\newcommand\dum{\xi}
\newcommand\Ddum{d\dum}
\newcommand\lambdaPF{\lambda_{\rm{PF}}} % Peron Frobenius eigenvalue
\newcommand\rhoPF{\rho_{\rm{PF}}} % Peron Frobenius eigenvector

\title{Notes on Constraining Uncertainty with Laws of Physics}

\author{Andrew M.\ Fraser}
\begin{document}
\tableofcontents
\maketitle
\begin{abstract}
  Initially, I derived both these notes and \emph{Constraining
    Uncertainty with Laws of Physics}\footnote{LA-UR-13-21824} (which
  the SIAM/ASA \emph{Journal on Uncertainty Quantification} has
  rejected) from a single document\footnote{\emph{Propagating
      Uncertainty About Gas EOS to Performance Bounds for an Ideal
      Gun} LA-UR-12-22731}.  These notes contain both some old
  material I did not include in my JUQ manuscript and some new ideas
  that I am developing.
\end{abstract}

\section{Moments over $\PolytopeN$}
\label{sec:moments}

% FixMe: Explain that we want distributions of functionals
Given a distribution of functions $f$ over $\PolytopeInf$, one
defines the mean and covariance as follows:
\begin{align*}
  \mu(t) &\equiv \EV{f}{f(t)} \\
  \Sigma(s,t) &\equiv \EV{f}{(f(t)-\mu(t))(f(s)-\mu(s))}.
\end{align*}
From $\mu$ and $\Sigma$ one can derive the mean and covariance of
linear functionals of $f$.  For example, combining the functions $q_0$
and $q_1$ into $Q \equiv \begin{bmatrix} q_0\\q_1 \end{bmatrix}$
yields a map $Q:f\mapsto {\cal V}$ with $\begin{bmatrix}
  E\\T \end{bmatrix}\in {\cal V}$, from elements of $\PolytopeInf$ to
affine estimates of the energy and time at the muzzle with mean and
covariance given by
\begin{subequations}
  \label{eq:qmoments}
  \begin{align}
    \mu_V &= Q \mu\\
    &\equiv \begin{bmatrix} \inner{\mu}{q_0} \\
      \inner{\mu}{q_1} \end{bmatrix}\\
    \Sigma_V &= Q\Sigma Q^T \\
    &\equiv
    \begin{bmatrix}
      \int\int \Sigma(s,t) q_0(s) q_0(t) \,ds \,dt & \int\int
      \Sigma(s,t) q_0(s) q_1(t) \,ds \,dt \\
      \int\int \Sigma(s,t) q_1(s) q_0(t) \,ds \,dt & \int\int
      \Sigma(s,t) q_1(s) q_1(t) \,ds \,dt
    \end{bmatrix}.
  \end{align}
\end{subequations}

To characterize the uniform distribution over $\PolytopeInf$, I use
the log-log construction described in the paper to make the
distribution of $\PolytopeN$ invariant to shifts in $X$.
\begin{figure*}
  \centering
    \resizebox{0.75\textwidth}{!}{\includegraphics{invariant.pdf}}
    \caption{Upper plot: Segment of nominal isentrope with $\pm 2.5\%$
      range on linear scale.  Lower plot: Same curves divided by the
      nominal on log-log scale.  Note the invariance wrt to
      translation of $x$.}
  \label{fig:invariant}
\end{figure*}

The symmetry of translation invariance makes it possible to describe
the probabilities as a stationary stochastic process with a transition
probability function and stationary distribution.  Defining
$Q$ by $P_k = \sum_{ij} Q_{k,ij}P_{ij}$, I estimate $\mu(t)$ and
$\Sigma(0,t)$ as follows
\begin{subequations}
  \label{eq:StatMom}
  \begin{align}
    \mu(t) &= \sum_i P_i f_i(t) = f(t)\cdot Q\cdot P_{\text{stationary}}\\
    \Sigma(0,t) &= \sum_{m,n} (f_m(0) - \mu(0)) \sum_{\mathbf{ij}}
    Q_{m,ij(1)}P_{ij(1)}P_{ij(2)|ij(1)} \left( \prod_{\tau=2}^{t-1}
      P_{ij(\tau+1)|ij(\tau)} \right) Q_{n,ij(t)}  (f_n(t) - \mu(t))  \\
    &= A \cdot B^t \cdot C \text{ where} \\
    A_{ij} &= \sum_m (f_m(0) - \mu(0)) Q_{m,ij}P_{ij} \\
    B_{ij,jk} &= P_{ij|jk} \text{ and} \\
    C_{jk} &= \sum_n (f_n(t) - \mu(t)) Q_{n,jk}.
  \end{align}
\end{subequations}
Results appear in Fig.~\ref{fig:moments}.

\begin{figure*}
  \centering
    \resizebox{1.3\textwidth}{!}{\includegraphics{moments.pdf}}
    \caption{Characterization of the stationary distribution
      in terms of moments calculated using \eqref{eq:StatMom}. (a) $P_f$,
      (b) ACF 1-d, (c) ACF 2-d.}
  \label{fig:moments}
\end{figure*}

\subsection{Discretely Sampled ACF}
\label{sec:discrete}

Although the formulas \eqref{eq:StatMom} only provide values of
$\Sigma(s,t)$ for pairs of positions $s$ and $t$ that obey
$\frac{s}{t} = e^{n\delta}$ where $n$ is an integer and $\delta$ is
the spacing between samples on a log scale, I want to use them to
approximately characterize $\PolytopeInf$.  In particular, I want the
following:
\begin{description}
\item[Correlation of functionals] Like the entries in $\Sigma_V$ in
  \eqref{eq:qmoments}, \emph{vis.}
  \begin{equation}
    \label{eq:corr}
   \EV{f}{\inner{f-\mu}{q_\alpha}\inner{f-\mu}{q_\beta}} =
   \inner{q_\alpha, \Sigma}{q_\beta} \equiv \int\int \Sigma(s,t)
   q_\alpha(s) q_\beta(t) \,ds \,dt
\end{equation}
\item[Spectral decomposition] From eigenvalues and eigenfunctions
  $\left\{\lambda, e_\lambda \right\}$, one can also calculate such
  terms in the following manner:
  \begin{equation}
    \label{eq:spec1}
    \inner{q_\alpha, \Sigma}{q_\beta} = \int \lambda
    \inner{q_\alpha}{e_\lambda} \inner{q_\beta}{e_\lambda} d\lambda.
  \end{equation}
  Also, the eigenfunctions corresponding to the largest eigenvalues
  indicate the directions in function space with the largest variance
  \begin{equation}
    \label{eq:spec2}
    \EV{f}{\inner{f-\mu}{e_\lambda}^2} = \inner{e_\lambda, \Sigma }{
      e_\lambda } = \lambda.
  \end{equation}
  (You need to find out about convergence of the spectral
  decomposition.  Look up reference for compact operators having
  discrete spectra.)
\end{description}

One uses a spectral decomposition of the auto-correlation function to
characterize the distributions of quantities of interest.  For
example, combining \eqref{eq:corr} and \eqref{eq:spec1}, one can
express the covariance of \eqref{eq:qmoments} as
\begin{equation}
  \label{eq:covV}
  \Sigma_V = \sum_i \lambda_i
  \begin{bmatrix}
    \inner{e_{i}}{q_0}^2 & \inner{e_{i}}{q_0}
    \inner{e_{i}}{q_1} \\
    \inner{e_{i}}{q_0} \inner{e_{i}}{q_1} &
    \inner{e_{i}}{q_1}^2
  \end{bmatrix}.
\end{equation}
The following analysis demonstrates that the discrete inner
product
\begin{equation}
  \label{eq:inner}
  \inner{u}{v}_\Delta = \sum_i u(x_i) v(x_i) \Delta_i
\end{equation}
accounts for the nonuniform spacing of samples.
The definition of an eigenfunction of the auto-correlation function is
\begin{align*}
  \int \Sigma(s,t) e_\lambda(t) d\, t &= \lambda e_\lambda(s) \\
  &\approx \sum_i \Sigma(s,t_i) \Delta_i e_\lambda(t_i) \equiv \inner{
    \Sigma}{e_\lambda}_\Delta.
\end{align*}
Consequently, I can write the quadratic
\begin{equation}
  \label{eq:Quad}
  \EV{f}{u(f-\mu)v(f-\mu} \approx \sum_{i,j} \Sigma(x_i,x_j) u(x_i)
  v(x_j) \Delta_i \Delta_j = \inner{u}{ \inner{\Sigma}{v }_\Delta
  }_\Delta
\end{equation}
in terms of the discrete inner product and the covariance.  Note that
\eqref{eq:Quad} applies to \eqref{eq:corr}, \eqref{eq:spec1},
\eqref{eq:spec2} and \eqref{eq:covV}, and that the independent
direction corresponding to eigenvector $e_\lambda$ has coordinates
\begin{equation*}
  s_i = \frac{\left( e_\lambda \right)_i }{ \Delta_i }
\end{equation*}
in sample space.  Results of a spectral decomposition of $\Sigma$
appear in Fig.~\ref{fig:PCA}.  There, I have plotted normalized
functions, \emph{viz.} $\left\{ s_i \right\}$ such that $\forall i$
\begin{align*}
  \inner{s_i}{s_i}_{\Delta} &= 1 \\
  \inner{s_i}{\inner{\Sigma}{s_i}_{\Delta}}_\Delta &= \lambda_i \\
  \inner{s_k}{\inner{\Sigma}{s_i}_{\Delta}}_\Delta &= 0 ~\forall k \ne
  i.
\end{align*}
\begin{figure*}
  \centering
    \resizebox{0.95\textwidth}{!}{\includegraphics{PCA.pdf}}
    \caption{A principal component analysis of $\Polytope{1000}$.  The
      eigenvalues $\lambda_i$ of the covariance appear in the upper
      plot.  The eigenfunctions corresponding to the five largest
      eigenvalues appear in the lower plot.}
  \label{fig:PCA}
\end{figure*}

\subsection{Bilinear Forms}
\label{sec:bilinear}

The shift invariant construction combined with entropy maximization
provides values of the covariance kernel at positions uniformly spaced
on a log scale.  Here, I use $x$ to denote those positions and $d$ to
denote the interval lengths associated with them.  I am interested in
two bilinear forms on the space of functions, the continuous integral
\begin{equation*}
  \inner{g,\Sigma}{h}_I \equiv \int_{x_i}^{x_f} g(\dum_1) \Sigma(\dum_1,
  \dum_2) h(\dum_2) \Ddum_1 \Ddum_2
\end{equation*}
and its approximation
\begin{equation}
  \label{eq:innerd}
  \inner{g, \Sigma}{h}_d \equiv \sum_{i,j} g(x_i) d_i \Sigma_{i,j} d_j
  h(x_j)
\end{equation}
based on values of $g$ and $h$ sampled at the points $x$.  Letting $D$
denote the matrix with components $D_{i,j} = d_i \delta_{i,j}$ and $g$
and $h$ denote vectors with components $g_i = g(x_i)$ and $h_i =
h(x_i)$, I write \eqref{eq:innerd} in standard vector-matrix notation
as
\begin{equation}
  \label{eq:innerM}
  \inner{g, \Sigma}{h}_d = \left(g D\right)^T \Sigma D h .
\end{equation}
Equation \eqref{eq:innerd} or \eqref{eq:innerM} make it easy to
estimate covariances of functionals like \eqref{eq:corr}.

I also want a spectral decomposition of $\Sigma$ into normalized
eigenfunctions $h_\alpha$ and eigenvalues $\lambda_\alpha$ with
\begin{subequations}
  \label{eq:eigenfunctions}
  \begin{align}
    \inner{h_\alpha}{h_\beta}_I &= \delta_{\alpha,\beta} \\
    \inner{h_\alpha, \Sigma}{h_\beta}_I &= \delta_{\alpha,\beta}
    \lambda_\alpha.
  \end{align}
\end{subequations}
The following sequence of equations indicates that diagonalizing
$\tilde \Sigma \equiv D^T\Sigma D$ provides the discrete equivalents:
\begin{align}
  \inner{h_\alpha, \Sigma}{h_\beta}_d &= (D h_\alpha)^T \Sigma D
  h_\beta \\
  &= h_\alpha^T D^T \Sigma D h_\beta \\
  &\equiv h_\alpha^T W \Lambda W^T h_\beta \\
  \label{eq:eigenvectors}
  &= \sum_i w_i \cdot h_\alpha \lambda_i w_i \cdot h_\beta
\end{align}
Since substituting normalized versions of $w_i$ and $w_j$ in for
$h_\alpha$ and $h_\beta$ makes the bilinear form
\eqref{eq:eigenvectors} imitate \ref{eq:eigenfunctions}, the
eigenvalues of $\tilde \Sigma$ correspond to the eigenvalues of the
correlation kernel and the vectors
\begin{equation*}
  h_i = \frac{w_i}{\sqrt{\inner{w_i}{w_i}_d}}
\end{equation*}
are the normalized eigenfunctions sampled at the points $x$.  Notice
that the coefficient of $w_i$ for a function $h$ is $\inner{D^{-1}
  w_i}{h}_d$ or $w_i\cdot h$ and not $\inner{w_i}{h}_d$

\subsection{Cross Sections}
\label{sec:cross-sections}

I had conjectured that the large number of facets on $\PolytopeN$
ensured that $\PolytopeN$ would be well approximated by its first and
second moments for large $N$.  Figure~\ref{fig:PCA} illustrates the
spectral decomposition of the covariance which I've used as the basis
for an ellipsoidal approximation of $\PolytopeN$.
Figure~\ref{fig:ellipsoid} compares cross sections of $\PolytopeN$ to
cross sections of that ellipsoid.  It contradicts my conjecture about
$\PolytopeN$.

\begin{figure*}
  \centering
    \resizebox{1.25\textwidth}{!}{
    \hspace{-0.6\textwidth}\includegraphics{ellipsoid.pdf}}
  \caption{Cross sections of both an allowed polytope $\Polytope{1000}$
    and an approximating ellipsoid.  In each plot coefficients of a
    pair of basis functions appear.  The labels on the axes of each
    plot indicate the indices of the coefficients and basis functions.
    Given $\Sigma$, the covariance in the 2-d subspace of the two
    basis functions conditioned on all other coefficients, the
    equation $x^T \Sigma^{-1} x = 4$ defines the ellipse.}
  \label{fig:ellipsoid}
\end{figure*}

\section{Recursive Convexity}
\label{sec:recursive}

A triple of samples of an allowed function $f$ must obey
\begin{enumerate}
\item The multiplicative bounds on deviation from $\nomf$
\item Monotonicity
\item Convexity
\end{enumerate}
In addition, if $f$ is drawn from the stationary distribution, it must
also be possible to extend it both to the right and the left.  If the
slope is either too large or too small, it may be impossible to extend
it and obey both convexity and the upper multiplicative bound.  In
Figure~\ref{fig:recursive}, any function that crosses from below the
green line to above it before the point where the green line is
tangent to the upper bound cannot be continued beyond that point of
tangency without violating either the bound or convexity.  A similar
constraint involving the red line applies to extending functions to
the left.  Combining the constraints yields a requirement that any
allowed function that passes through the dot in the middle of the
figure must stay between the red and green lines to the right and left
of the dot.

\begin{figure}
  \centering
    \resizebox{0.4\textwidth}{!}{\includegraphics{recursiveIV.pdf}}
    \resizebox{0.4\textwidth}{!}{\includegraphics{recursiveV.pdf}}\\
    \resizebox{0.32\textwidth}{!}{\includegraphics{recursiveI.pdf}}
    \resizebox{0.32\textwidth}{!}{\includegraphics{recursiveII.pdf}}
    \resizebox{0.32\textwidth}{!}{\includegraphics{recursiveIII.pdf}}
  
    \caption{Recursive Convexity: Upper left: All functions that pass
      through the center point must stay between the red and green
      bounds in addition to staying inside the blue bounds.  Upper
      right: All functions that pass through the center window, ie,
      the 3rd quantization level, must stay between the green and red
      lines going forward.  Lower plots: Functions that pass through
      the first two open windows must also pass through the magenta
      window at the third level.}
  \label{fig:recursive}
\end{figure}

I used the following calculations to find the tangent points in
Figure~\ref{fig:recursive}.
\begin{align*}
  U(x) &= \frac{U_1}{x^3} \\
  U'(x) &= \frac{-3U_1}{x^4}
\end{align*}
If $\left. f(x)\right|_{x=1} = f_1$ then the tangents to $U$ that pass
through $(1, f_1)$ are solutions to
\begin{align*}
  U(x) &= f_1 + (x-1)U'(x) \\
  \frac{U_1}{x^3} &=  f_1 - \frac{3(x-1)U_1}{x^4} \\
  U_1 &= x^3 f_1 + \frac{3(1-x)U_1}{x} \\
  &=  x^3 f_1 + \frac{3U_1}{x} - 3U_1 \\
  0 &= x^4 f_1 + 3U_1 - x4U_1
\end{align*}

\afterpage{\clearpage} % Flush floats

\newpage

\section{Shaw's EOS}
\label{sec:shaw}

Here are Eqns.\ 4.6-4.9 of Hixson et al.
\newcommand{\CJ}{_{\text{CJ}}}
\begin{align}
  x(\rho) &= \rho - \rho\CJ \\
  g(\rho) &= \sum_{i=0}^3 d_i \left( x(\rho) \right) ^i \\
  f(\rho) &= \sum_{i=0}^6 c_i \left( x(\rho) \right) ^i \\
  P(\rho,E) &= \rho^3 f(\rho) + g(\rho)(E - E\CJ (\rho) ) \\
  &= (x+\rho\CJ)^3f(\rho) + g(\rho)(E - E\CJ (\rho) ) \\
  \label{eq:ecj}
  E\CJ (\rho) &= E_0 - \int_{V\CJ}^{V(\rho)} \rho^3 f (\rho(V)) dV .
\end{align}
Following Eqn.\ 4.9, Hixson et al.\ write
\begin{align*}
  \rho\CJ &= 2.4403 \frac{\text{g}}{\text{cm}^3}\\
  \gamma_S &= 3.038 \\
  P\CJ &= 35.3 \text{ GPa} .
\end{align*}
Differentiating polynomials yields
\begin{align*}
  g'(\rho) &= \sum_{i=1}^3 i d_i \left( x(\rho) \right) ^{i-1} \\
  f'(\rho) &= \sum_{i=1}^6 i c_i \left( x(\rho) \right) ^{i-1}.
\end{align*}

Changing variable, I write \eqref{eq:ecj} as
\begin{align*}
   E\CJ (\rho) &= E_0 - \int_{\rho\CJ}^\rho \rho^3 f(\rho) \frac{dV}
   {d \rho} d \rho  \\
   &= E_0 + \int_{\rho\CJ}^\rho \rho f(\rho) d \rho \\
   &= E_0 + \int_{\rho\CJ}^\rho\sum_{i=0}^6 c_i \rho \left( \rho -
   \rho\CJ \right) ^i d \rho \\
   &= E_0 + \int_0^{\rho -\rho\CJ} \sum_{i=0}^6 c_i (r - \rho\CJ) r^i d r \\
   &= E_0 + \int_0^{\rho -\rho\CJ} \left( \sum_{i=0}^6 c_i r^{i+1}
     - \sum_{i=0}^6 c_i \rho\CJ r^i \right) d r \\
   &= E_0 + c_0 \rho\CJ (\rho - \rho\CJ) + \int_0^{\rho -\rho\CJ}
   \left( \sum_{i=0}^6 c_i r^{i+1}
     - \sum_{i=1}^6 c_i \rho\CJ r^i \right) d r \\
   &= E_0 + c_0 \rho\CJ (\rho - \rho\CJ) + \int_0^{\rho -\rho\CJ}
   \sum_{i=0}^6 \left( c_i - c_{i+1} \rho\CJ \right) r^{i+1} d r
   \text{ where } c_7 = 0\\
   &= E_0 + c_0 \rho\CJ (\rho - \rho\CJ) +
   \sum_{i=0}^6 \frac{ c_i - c_{i+1} \rho\CJ}{i+1} \left( \rho -
     \rho\CJ \right)^{i+2}\\
   &= E_0 + c_0 \rho\CJ x +
   \sum_{i=0}^6 \frac{ c_i - c_{i+1} \rho\CJ}{i+1} x^{i+2}
\end{align*}
Note
\begin{align*}
  \frac{d}{dx} E\CJ(\rho(x)) &= E'\CJ \frac{d \rho}{dx} \\
  E'\CJ &= \rho f(\rho) \\
  \frac{d \rho}{dx} &= 1 \\
  \frac{d}{dx} E\CJ(\rho(x)) &= (x+\rho\CJ)f(\rho)
\end{align*}


For the script \emph{eos.py} I need
\begin{align*}
  \partialfixed{P}{v}{S} &= \partialfixed{P}{x}{S} \frac{d x}{d v} \\
  \frac{d x}{d v} &= -\rho^2 = -(x+\rho\CJ)^2\\
  \partialfixed{P}{x}{S} &= 3\rho^2 f(\rho) + \rho^3 f'(\rho) +
  g'(\rho) \left( E - E\CJ(\rho) \right) + g(\rho)E'\CJ(\rho) \\
  &= 3\rho^2 f(\rho) + \rho^3 f'(\rho) +
  g'(\rho) \left( E - E\CJ(\rho) \right) + \rho f(\rho)g(\rho)
\end{align*}

\section{First Order}
\label{sec:first-order}

\subsection{To Do}
\label{sec:do}

Plot errors

Characterize convergence of $\lambda$ and $\mu$ as $\Delta_h$ and
$\Delta_g$ get small.  Also dependence on $u$ and $dy$.

As $dy \rightarrow 0$ look for convergence of
$\mu_{\text{stationary}}$ and characterize $\lambda$ dependence.

Analyze error of quadratic approximation for finite $u$.

\begin{enumerate}
\item For fixed $u$, find $n_g$ and $n_h$ that get eigenvector error
  below 0.02 for $dy \in \left\{dy_0, dy_1,dy_2 \right\}$ where $dy_k
  \equiv \frac{dy_0}{2^k}$ and $dy_0$ is \emph{small}.  Then for each
  $k$:
  \begin{enumerate}
  \item Calculate stationary joint, conditional and marginal
    distributions
  \item Plot the marginals and calculate the KL differences
  \item Raise the conditional to the $2^k$ power and multiply by the
    marginal to get the joint and then use KL difference to compare
    the result to the joint for $k=0$.
  \end{enumerate}
\item Analyze (and write up in this document) the effect of dividing
  $\Delta_y$ in half on the integral operator, the eigenvalue,
  $\lambdaPF$ and the eigenvector $\rhoPF$.
\item Study numerical convergence properties as $\frac{1}{n_g}
  \rightarrow 0$ and $\frac{1}{n_h} \rightarrow 0$ and select
  convergence criterion.
\item Using $n_g(dy)$ and $n_h(dy)$ that satisfy convergence
  criterion, numerically study the dependence of eigenvalue and
  eigenvector on $dy$ as $dy \rightarrow 0$.
\end{enumerate}

\subsection{Code Performance}
\label{sec:code}

The command
\begin{verbatim}
nohup time python3 converge.py --out_file study_15000_800_08T --n_g0
2000 --n_g_step 4 --n_g_final 15000 --n_h0 450 --n_h_step 3
--n_h_final 800 --ref_frac .8 --dy 0.00008
\end{verbatim}
died with the following
\begin{verbatim}
Command terminated by signal 9
2592757.31user 3063.76system 121:55:36elapsed 591%CPU (0avgtext+0avgdata 131092600maxresident)k
30691224inputs+0outputs (2899406major+57075951minor)pagefaults 0swaps
\end{verbatim}
I think that it finished the first set of power iterations (each took
almost exactly 15 minutes) and ran out of memory when it started the
second set.  If I'm right, then the first eigenvector calculation
terminated after about 487 iterations.

Running the command
\begin{verbatim}
nohup time python3 converge.py --out_file study_8000_800_08T --n_g0 2000 --n_g_step 4 --n_g_final 8000 --n_h0 450 --n_h_step 3 --n_h_final 800 --ref_frac .8 --dy 0.00008 
\end{verbatim}
initially used 31g of memory and took 3.5 minutes for each cycle
(matrix multiply) in power().  After 9500 minutes of cpu time, memory
use increased to 48g and the cycle time decreased to 73 seconds.
After about 12900 minutes of cpu time the cycle time dropped to about
46 seconds and memory use increased to 57g.  As cycle time dropped
further for smaller operators, the memory use stayed at 56g.

\begin{tabular}{|l|l|l|l|l|l|l|l|l|l|}
  \hline
  $u\cdot 10^5$ & $dy\cdot 10^4$ & $n_g$ & $n_h$ & build & eig & $n_{\text{states}}$ & 
  $n_{\text{pairs}}$ & no suc & mem \\
  \hline
  2.0 & 0.8 &  250 &  250 & 2.9 &   4.0& 41.8K & 33M  & .0273 &  80M \\
  2.0 & 0.8 &  500 &  500 & 11  &   44 & 167K  & 523M & .0135 & 273M \\
  2.0 & 0.8 & 1000 & 1000 & 48  & 1283 & 667K  & 8.4G & .0068 & 1.1G \\
  2.0 & 0.8 & 2000 & 2000 & 204 &      & 2.67M & 134G & .0034 & 5.4G \\
  2.0 & 0.8 & 3000 & 3000 &     &      & 6.00M & 676G & .0023 & 14G  \\
      &     &      &      &     &      &       &      &       &      \\
  2.0 & 1.6 & 250  & 250  & 2.9 & 9.3  & 42K   &  66M & .0146 &  84M \\
  2.0 & 1.6 & 500  & 500  & 12  & 66   & 167K  & 1.1G & .0074 & 303M \\
  2.0 & 1.6 & 1000 & 1000 & 51  & 694  & 667K  &  17G & .0037 & 1.3G \\
  2.0 & 1.6 & 2000 & 2000 & 228 &      & 2.67M & 270G & .0018 & 7.2G \\
      &     &      &      &     &      &       &      &       &      \\
  2.0 & 3.2 & 250  & 250  & 3.0 & 3.1  & 41K   & 135M & .0082 &  91M \\
  2.0 & 3.2 & 500  & 500  & 13  & 39   & 167K  & 2.1G & .0041 & 358M \\
  2.0 & 3.2 & 1000 & 1000 & 57  & 619  & 667K  & 34G  & .0021 & 1.8G \\
  2.0 & 3.2 & 1500 & 1500 & 140 & 3109 & 1.50M & 174G & .0014 & 5.1G \\
  %u    dy    ng     nh     build eig    states  pairs  suc     mem
  \hline
\end{tabular}
The \emph{mem} numbers come from the \emph{RES} column.  The runs with
$3.2\times 10^{-4}$ converge with about 100 interations Initially
those with smaller values of $dy$ terminated after 149 iterations
without converging.  I erased those times and increased the maximum
number of iterations to 1000 which was enough for the runs with times
entered.

By looking at surface plots, I've found the following table of $dg$
and $dh$ values required to get error down to $\%2$ as a function of
$dy$.

\begin{tabular}{|l|l|l|l|l|l|c|}
  \hline
  $u\cdot 10^5$ & $dy\cdot 10^4$ & $dg\cdot 10^8$ & $(dy)^2/dg$
  & 2u/dg &$dh\cdot 10^4$ & filename \\
  \hline
  2.0 & 0.8 & .55 & 1.16 &7273& 3.5  & study\_8000\_800\_08T \\
  2.0 & 1.6 & 2.5 & 1.02 &1600& 5.7  & study\_2500\_800\_16T \\
  2.0 & 3.2 & 12  & 0.85 &333 & 7.1  & study\_1000\_600\_32T \\ % dg is wrong
  2.0 & 6.4 & 40  & 1.02 &100 & 5.1  & study\_200\_200\_64T \\
  %u    dy    dg  dy^2/dg        dh    filename
  \hline
\end{tabular}

\subsection{Introduction}
  \label{sec:introduction}

Here I consider a first order Markov process on 2-d states consisting
of function values $g(y)$ and derivatives $h(y) \equiv \left. \frac{d
    g }{d t} \right|_{t=y}$.  I consider sequential samples at $y_0$
and $y_1$, and I suppose that the upper bound is $u$ and the lower
bound is $l=-u$.

In $(f,x)$ coordinates, I describe the nominal function, $\tilde f$ by
\begin{align*}
  x_0 &= 1\\
  f_0 &= f(x_0) \\
  \tilde f(x) &= \frac{\tilde f_0}{x^3}.
\end{align*}
The coordinates $g$ and $y$ are functions of coordinates $f$ and $x$ with
\begin{align*}
  x(y) &= e^y \\
  f(g) &= \frac{\tilde f_0 e^{g}}{x^3} \\
  g(f) &= \log(f) + 3y - \log(\tilde f_0).
\end{align*}
The straight line in $(g,y)$, $g(y) = 0$, corresponds to
\begin{equation*}
  \tilde f(x) = \frac{\tilde f_0}{x^3}.
\end{equation*}

In the $(f,x)$ coordinates a line that is tangent to $\tilde f(x)$ at
$x=1$ satisfies
\begin{equation*}
  f(x) = f_0(4-3x),
\end{equation*}
which in the $(g,y)$ coordinates is
\begin{equation*}
  g(y) = \log \left( 4 - 3e^y \right) + 3y.
\end{equation*}
At $y=0$, that $g$ has the following derivatives
\begin{align*}
  g(0) & = g_0 \\
  g'(y) &= 3 - \frac{3e^y}{4-3e^y} \\
  g'(0) &= 0 \\
  g'' &= -\frac{\frac{4}{3}e^y}{\left( e^y - \frac{4}{3} \right)^2}\\
  g''(0) &= -12 \\
  g''' &= -\frac{4\left( \frac{4}{3} + e^y \right) e^y}{
    3\left( \frac{4}{3} - e^y \right) ^3} \\
  g'''(0) &= -84.
\end{align*}
Thus the second order Taylor series approximation to the function in
$(g,y)$ that corresponds to a tangent to $\frac{\tilde f_0
  e^{g_0}}{x^3}$ at $x_0 = 1$ is
\begin{equation}
  \label{eq:tangent}
  g(y) = a - 6(y-b)^2.
\end{equation}
I will use the Taylor series approximation \eqref{eq:tangent} to
tangents in $(f,x)$ from now on.  Figure~\ref{fig:taylor} illustrates
that approximation.

\begin{figure}
  \centering
    \resizebox{0.65\textwidth}{!}{\includegraphics{taylor.pdf}}  
  \caption{Second order Taylor series approximation to $g(y)$ compared
    to actual $g(y)$ when $f = e^g = \alpha - \beta x$,
    $y=\log(x)$, and $f$ is tangent to $\frac{1}{x^3}$ at $x=1$.}
  \label{fig:taylor}
\end{figure}

Now I describe quadratic approximations to constraints on the values
of $g_1 \equiv g(y_1)$ and $h_1 \equiv h(y_1)$ that are determined by
$g_0 \equiv g(y_0)$ and $h_0 \equiv h(y_0)$.  I use $U_g$, $L_g$,
$U_h$, and $L_h$ to denote (upper/lower) bounds on ($g_1/h_1$)
respectively.  A trajectory $g$ that goes through $g_0$ at $y_0$ must
lie between the two lines $U_g(g_0,y_0,y)$ and $\bar U_g(g_0,y_0,y)$
that in $(f,x)$ coordinates go through $f_0$ and are tangent to the
upper bound $u$ to the right and left of $y_0$ respectively.  If $h_0$
is the derivative of $g$ at $y_0$, the value of $g$ at $y_1$, ie,
$g_1$ must lie above $L_g(g_0,h_0,y_0, y_1)$, the image in $(g,y)$ of
the tangent to $f$ at $x_0$ in $(f,x)$.  Thus
\begin{equation}
  \label{eq:boundsA}
  U_g(g_0,y_0, y_1) \geq g_1 \geq L_g(g_0,h_0,y_0, y_1).
\end{equation}
The following calculations fit quadratic approximations to both $U_g$
and $L_g$.
\newcommand{\Rad}[1]{\sqrt{24\left(u#1\right)}}
\newcommand{\UgQ}{ g_0 + \Delta_y\Rad{-g_0} - 6 \Delta_y^2}
\begin{align}
  U_g(y) &= u - 6(y-b)^2 &&\text{Premise} \nonumber \\
  U_g(y_0) &= g_0 &&\text{Premise} \nonumber \\
  \label{eq:premise}
  U'_g(y_0) &\geq 0 &&\text{Premise} \\
  g_0 &= u - 6(y_0-b)^2 \nonumber \\
  b &= y_0 \pm \sqrt{\frac{u-g_0}{6}} && \text{Positive root to satisfy
    \eqref{eq:premise} \& \eqref{eq:dugb}}  \nonumber \\
  \label{eq:dugb}
  \frac{d U_g}{d y} &= -12(y-b) \\
  \label{eq:dug}
  \left. \frac{d U_g}{d y} \right|_{y_0} &= 12\sqrt{\frac{u-g_0}{6}} \\
  U_g(y) &= g_0 + (y-y_0)\Rad{-g_0} - 6 (y-y_0)^2 \nonumber
%  &= \UgQ \nonumber
\end{align}
Note: If $U_g(y)$ is tangent to $y=u$ for some $y$ between $y_0$ and
$y_1$, then the only upper bound on $g(y_1)$ is $g(y_1)\leq u$.  Also
such a tangency exists iff
\begin{equation*}
  \sqrt{\frac{u-g_0}{6}} \leq \Delta_y.
\end{equation*}
Thus
\begin{equation*}
  U_g(y_1) =
  \begin{cases}
    u & \text{ if } \sqrt{\frac{u-g_0}{6}} \leq \Delta_y\\
    \UgQ & \text{ otherwise}
  \end{cases}
\end{equation*}
Similarly, for the lower bound:
\newcommand{\LgQ}{g_0 + h_0\Delta_y - 6\Delta_y^2}
\begin{align*}
  L_g(y) &= a - 6(y-b)^2 &&\text{Premise} \nonumber \\
  L'_g(y) &= -12(y-b) \nonumber \\
  g_0 &= a - 6(y_0-b)^2 &&\text{Premise} \nonumber \\
  h_0 &= -12(y_0-b) &&\text{Premise} \nonumber \\
  b &= y_0 + \frac{h_0}{12} \nonumber \\
  a &= g_0 + \frac{h^2_0}{24} \\
  L_g(y_1) &= g_0 + h_0(y_1-y_0) -6(y_1-y_0)^2 \nonumber
  \\
  &= \LgQ \nonumber
\end{align*}
Substituting these values into \eqref{eq:boundsA} yields\footnote{The
  upper bound in Eqn.\ \eqref{eq:UL_g} is simply $u$ if $g_0 \geq u -
  6 \Delta_y^2$, and the lower bound in Eqn.\ \eqref{eq:DG_DY} is only
  valid if $\sqrt{\frac{u-g_0}{6}} \geq \Delta_y$.}
\begin{align}
  \label{eq:UL_g}
  \UgQ &\geq g_1 \geq U_g(y_1) \\
  \label{eq:DG_DY}
  \Rad{-g_0} - 6\Delta_y &\geq \frac{\Delta_g}{\Delta_y} \geq h_0
  - 6 \Delta_y
\end{align}
Figure~\ref{fig:boundsA} illustrates the bounds
\eqref{eq:boundsA}.  In Fig.~\ref{fig:boundsB}, I have simply reduced
the extent in $y$.

\begin{figure}
  \centering
    \resizebox{0.5\textwidth}{!}{\includegraphics{bounds_04.pdf}}  
  \caption{Bounds on $g_1$ given $g_0$ and $h_0$}
  \label{fig:boundsA}
\end{figure}
\begin{figure}
  \centering
    \resizebox{0.5\textwidth}{!}{\includegraphics{bounds_005.pdf}}  
  \caption{Bounds on $g_1$ given $g_0$ and $h_0$}
  \label{fig:boundsB}
\end{figure}

Given $g_0$ and a $g_1$ that satisfies the constraints
\eqref{eq:boundsA}, the constraints on $h_1$ are that it must be less
than the slope $\left. \frac{d U_g(g_1,y_1,y)}{dy} \right|_{y=y_1}$
and it must be greater than the slope at $y_1$ of the image in $(g,y)$
of the line in $(f,x)$ that connects the images of $(g_0,y_0)$ and
$(g_1,y_1)$.  The following calculation finds the quadratic
approximation for that curve:
\begin{align*}
  g(y) &= a -6(b-y)^2 \\
  g_0 &= a -6(b-y_0)^2 & g_1 &= a -6(b-y_1)^2 \\
  a &= g_0 + 6(b-y_0)^2 =  g_1 + 6(b-y_1)^2 \\
  g_0 -g_1 &= 12b(y_0-y_1) + 6(y_1-y_0)(y_1+y_0) \\
  b &= \frac{g_1-g_0 + 6 (y_1^2-y_0^2)}{12(y_1-y_0)} = \frac{g_1 -
    g_0}{12 (y_1 - y_0)} + \frac{y_1 + y_0}{2}\\
  a &= \frac{\Delta_g^2}{24 \Delta y ^2} + \frac{g_1 + g_2}{2} +
  \frac{ 3 \Delta_y^2}{2}.
\end{align*}
At $y_1$, the derivative of $a-6(y-b)^2$ is
\newcommand{\LhQ}{\frac{\Delta_g}{\Delta_y} - 6 \Delta_y}
\begin{align*}
  L_h(g_1, g_0, y_0, y_1) &= -12(y_1-b) \\
  &= \frac{g_1 - g_0 -6(y_1 - y_0)^2} {y_1-y_0} \\
  & \equiv \LhQ
\end{align*}
Following \eqref{eq:dug}, the upper bound is
\newcommand{\Uh}{\Rad{-g_1}}
\begin{equation*}
  U_h(g_1) = \Uh,
\end{equation*}
and\footnote{The \emph{maximum} function and the term $-\Uh$ treat the
case of $U_g(y)$ being tangent to $u$ between $y_0$ and $y_1$.}
\begin{equation}
  \label{eq:UL_h}
  \Uh \geq h_1 \geq \text{max}\left( \LhQ,~ -\Uh \right).
\end{equation}

Figure~\ref{fig:boundsC} illustrates those constraints.

\begin{figure}
  \centering
    \resizebox{0.65\textwidth}{!}{\includegraphics{bounds_dg.pdf}}  
    \caption{Curves $U_h(g_1)$ and $L_h(g_1, g_0)$ whose tangents give
      bounds on $h_1$ given $g_0$ and $g_1$}
  \label{fig:boundsC}
\end{figure}

\subsection{Integral Equations}
\label{sec:integral-equations}

I will use the following notation:
\begin{description}
\item[$y$] The independent variable, $y = \log(x)$
\item[$z(y)$] A state consisting of $g(y)$, the value of the function
  and $h(y) \equiv \left. \frac{d g(t)}{d t} \right|_y$, its derivative.
\item[$I_\Delta$] The indicator function:
  \begin{equation}
    \label{eq:indicator}
    I_\Delta(z_0, z_1) =
    \begin{cases}
      1 & \text{if } \left( z(0) = z_0,~z(\Delta)=z_1
      \right) \text{ is allowed} \\
      0 & \text{otherwise}
    \end{cases}
  \end{equation}
  I will drop the subscript from $I_\Delta$ unless I need it.
\item[${\lambdaPF}_\Delta$] The eigenvalue of the linear operator
  consisting of integration over $I_\Delta$ with the largest
  magnitude.  The Peron-Frobenius theorem promises that
  ${\lambdaPF}_\Delta$ is isolated and positive and that there is a
  corresponding eigenfunction that is strictly positive.
\item[${\rhoPF}_\Delta$] The positive unit norm eigenfunction that
  corresponds to ${\lambdaPF}_\Delta$; defined by
  \begin{equation}
    \label{eq:integralI}
    \int I(z_0, z_1) {\rhoPF}_\Delta(z_1) d z_1 \equiv {\lambdaPF}_\Delta
    {\rhoPF}_\Delta(z_0).
  \end{equation}
  I will drop the subscripts of ${\lambdaPF}_\Delta$ and
  ${\rhoPF}_\Delta$ when I can without creating ambiguity.
\item[$\psi, \phi$] A decomposition of $\rho$ with
  \begin{equation*}
    \rho(g,h) = \psi(g) \phi(h,g).
  \end{equation*}
  I think of $\phi$ as a family of functions of $h$ with $g$ as a
  parameter.
\item[$U_g,~L_g,~U_h,~L_h$] Functions that provide integration limits
  that express \eqref{eq:integralI} as
  \begin{equation}
    \label{eq:integralUL_}
    \int_{L_g(g_0,h_0)}^{U_g(g_0)} dg_1
    \int_{L_h(g_1,g_0)}^{U_h(g_1)} dh_1~ \rho(g_1, h_1) = \lambda
    \rho(g_0, h_0)
  \end{equation}
\end{description}

From the quadratic approximations \eqref{eq:UL_g} and \eqref{eq:UL_h}
I write the bounds in \eqref{eq:integralUL_} as follows:
\begin{align*}
  L_g(g_0, h_0) &= \max \left( -u, g_0 + h_0 \Delta_y - 6 \Delta_y^2
  \right)\\
  U_g(g_0) &=
  \begin{cases}
    u & g_0 > u - 6 \Delta_y^2 \\
    \min \left( u, g_0 + \Delta_y\Rad{-g_0} - 6 \Delta_y^2
    \right) & \text{otherwise}
  \end{cases} \\
  L_h(g_0, g_1) &= \max \left( \frac{\Delta_g}{\Delta_y} - 6
    \Delta_y, - \Rad{-g_1} \right) \\
  U_h(g_1) &= \Rad{-g_1} ,
\end{align*}
and then
\newcommand{\Ug}{ g_0 + \Delta_y\Rad{-g_0}}
\newcommand{\Lg}{g_0 + h_0\Delta_y}
\newcommand{\Lh}{\frac{\Delta_g}{\Delta_y}}
\begin{align}
  \lambda \rho(z_0) &= \int I(z_0, z_1) \rho(z_1) d
  z_1 \nonumber \\
  &\equiv \lambda \psi(g_0) \phi(h_0,g_0) \text{ Definition of } \psi,
  \phi \nonumber \\
  \label{eq:integralULq}
    &= \int_{L_g(g_0, h_0)}^{U_g(g_0)} \psi(g_1)
    \int_{L_h(g_0,g_1)}^{U_h(g_1)}~ \phi(h_1,g_1) ~dh_1 dg_1 \\
  \label{eq:integralUL}
    &\dhot \int_{\Lg}^{\Ug} \psi(g_1) \int_{\Lh}^{\Uh}~ \phi(h_1,g_1) ~dh_1 dg_1,
\end{align}
where $\dhot$ means \emph{equals after dropping higher order terms in}
$\Delta_y$.  Note:
\begin{itemize}
\item Dropping the terms $-6\Delta_y^2$ and $-6 \Delta_y$ in the lower
   limits of the integrals in going from \eqref{eq:integralULq} to
   \eqref{eq:integralUL} removes the possibility of states with $h>0$
   mapping to to states with lower values of $h$ which changes the
   character of the eigenvector I seek.
 \item I define $y_T(y_0,g_0) = \sqrt{\frac{u - g_0}{6}}$ as the point
   where the parabola that goes through $(y_0,g_0)$ with nonnegative
   slope is tangent to the upper bound $u$.  If $g_0 > u -
   6\Delta_y^2$ then $y_T < y_0 + \Delta_y$ and the appropriate bound on the
   integrals are different.  In particular, the upper limit on the
   integral over $g_1$ is
   \begin{equation*}
     U_g(g_0,h_0) = u,
   \end{equation*}
   and the lower limit on the integral over $h_1$ is
   \begin{equation*}
     L_h(g_0, g_1) = \max \left( \frac{\Delta_g}{\Delta_y} - 6
       \Delta_y, - \Rad{-g_1} \right).
   \end{equation*}
\end{itemize}

\subsection{Discrete Approximation}
\label{sec:approximate}

Let $A$ denote a discrete approximation of the integral operator, and
let $A(g,h)$ denote the set of components in the image of the single
component $(g,h)$.  $A$ maps states $(g,h)$ at $y$ to states $(g',h')$
at $y+\Delta_y$, and while $A^T$ maps from $y$ to $y-\Delta_y$, it is
not the inverse of $A$.  Figure~\ref{fig:Av} illustrates the action of
$A$.  If
\begin{equation*}
  (g',h') \in A(g,h),
\end{equation*}
then
\begin{equation*}
  (g', -h') \in A^T(g,-h).
\end{equation*}
Thus, if $S$ denotes the interchange of $h$ and $-h$
\begin{align*}
  SS &= I \\
  SA^TS &= A,
\end{align*}
which ought to be worth something.

\begin{figure*}
  \centering
    \resizebox{1.2\textwidth}{!}{\includegraphics{Av.pdf}}
  \caption{Action of integral operator and its transpose on selected points}
  \label{fig:Av}
\end{figure*}

I used a power iteration scheme to produce
Fig.~\ref{fig:eigenfunction}.  While the Perron-Frobenius Theorem
promises that the largest eigenvalue is positive and its eigenvector
is real and positive, and the results of scipy.sparse.linalg.eigs
almost has those properties, the resulting eigenvector has a small
imaginary part and the real parts do not all have the same sign.
\begin{figure*}
  \centering
    \resizebox{1.3\textwidth}{!}{\includegraphics{eigenfunction.pdf}}
  \caption{Eigenfunction of integral operator.}
  \label{fig:eigenfunction}
\end{figure*}

\subsection{Attempts to Solve the Integral Equation}
\label{sec:solve}

Differentiating
\eqref{eq:integralULq} once with respect to $h_0$ eliminates the
integral over $dg_1$, requires the substitution
\begin{equation*}
  g_1 = \LgQ,
\end{equation*}
and yields
\begin{equation*}
  \psi(g_0) \frac{\partial}{\partial h_0} \phi(h_0,g_0) =
  -\frac{\Delta_y} {\lambda} \psi(g_0+h_0 \Delta_y -6 \Delta_y^2)
  \int_{h_0 - 12 \Delta_y}^{\sqrt{24(u-g_0-h_0 \Delta_y + 6
      \Delta_y^2)}}~ \phi(h_1,g_0+h_0 \Delta_y -6 \Delta_y^2) ~dh_1. 
\end{equation*}
Note that the lower limit of the remaining integral is $h_0 - 12
\Delta_y$.  Thus the integral operator at $(g_0, h_0)$ has a domain in
$(g_1, h_1)$ that extends $6 \Delta_y$ below $h_0$.  Differentiating
again with respect to $h_0$ eliminates the integral over $dh_1$ and
yields
\begin{align*}
  \frac{\partial^2}{\partial h_0^2} \phi(h_0, g_0) &= \frac{-\Delta_y}
  { \lambda \psi(g_0)} \Bigl( \\
  &\quad \Delta_y \psi'(g_0+h_0 \Delta_y -6 \Delta_y^2)
  \int_{h_0 - 12 \Delta_y}^{\Rad{-g_0-h_0 \Delta_y + 6 \Delta_y^2}}~
  \phi(h_1,g_0+h_0 \Delta_y -6 \Delta_y^2) ~dh_1\\
  &\quad + \Delta_y \psi(g_0+h_0 \Delta_y -6 \Delta_y^2) \int_{h_0 - 12 \Delta_y}
  ^{\Rad{-g_0-h_0\Delta_y + 6\Delta_y^2 }}~ \frac{\partial}{\partial g}
  \phi(h_1,g_0+h_0 \Delta_y -6 \Delta_y^2) ~dh_1\\ 
  &\quad - \frac{12 \Delta_y \psi(g_0+h_0 \Delta_y -6 \Delta_y^2)} { \Rad{ -g_0
      -h_0\Delta_y + 6\Delta_y^2} } \phi \left( \Rad{-g_0-h_0\Delta_y
      + 6\Delta_y^2},g_0 + h_0 \Delta_y -6\Delta_y^2\right) \\
  &\quad - \psi(g_0+h_0 \Delta_y -6 \Delta_y^2) \phi(h_0 - 12\Delta_y,
  g_0 + h_0 \Delta_y - 6\Delta_y^2) \Bigr)
  \\
  &\dhot \frac{\Delta_y}{\lambda} \phi(h_0,g_0)
\end{align*}

The solution to the second order linear differential equation is
\begin{equation*}
  \phi(h) = a e^{\gamma h} + b e^{-\gamma h},
\end{equation*}
where
\begin{equation*}
  \gamma \equiv \sqrt{\frac{\Delta_y}{\lambda}}.
\end{equation*}
Since at $h = \Rad{-g_0}$, $\phi(h) = 0$,
\begin{align*}
  0 &= a e^{\gamma \Rad{-g_0}} + b e^{-\gamma \Rad{-g_0}} \\
  b &= a e^{2\gamma \Rad{-g_0}} \\
  \phi(h) &= -a e^{\gamma \Rad{-g_0}} \left(
    e^{\gamma \left( \Rad{-g_0} - h \right) } -
    e^{\gamma \left(\Rad{-g_0} - h \right) } \right) \\
  &= -2 a e^{\gamma \Rad{-g_0}} \sinh \left( \gamma \left( \Rad{-g_0}
      - h \right) \right),
\end{align*}
and I can write the solution as
\newcommand{\phiHG}[2]{\sinh \left( \gamma \left( \sqrt{24(u-#2)} - #1
    \right) \right)}
\begin{equation}
  \label{eq:sol_h}
  \rho(g,h) \equiv \psi(g) \phi(h,g) \equiv \psi(g) \phiHG{h}{g}.
\end{equation}
\newcommand{\altphiHG}[2]{e^{-6\gamma^2 \Delta_y h} \sinh \left( \gamma
    \left( \sqrt{24(u-#2)} - #1 \right) \right)} I've obtained an
alternate solution by assuming the form $\phi(h,g) = f(h) \sinh\left(
  \gamma \left( \sqrt{24(u-g)} - h \right) \right)$, expanding
$\phi(h)'' = \gamma^2 \phi(h - 12\Delta_y)$ and solving for $f$ after
dropping terms that are second order in $\Delta_y$.  The alternative
solution is
\begin{equation}
  \label{eq:sol_h_}
  \rho(g,h) \equiv \psi(g) \phi(h,g) \equiv \psi(g) \altphiHG{h}{g}.  
\end{equation}

\subsubsection{ODE for $\psi$}
\label{sec:ODE}

Next I use \eqref{eq:sol_h} in \eqref{eq:integralULq} and
differentiate to see if a solution for $\psi(g)$ is possible.  Noting
\begin{equation*}
  \int \phi(h,g) = \frac{-1}{\gamma} \cosh \left( \gamma \left(
      \Rad{-g} - h \right) \right) + C,
\end{equation*}
I write
\begin{align}
  & \lambda \psi(g_0)\phi(h_0,g_0) \nonumber\\
  & \quad =\int_{\LgQ}^{\UgQ} \frac{-\psi(g_1) }{\gamma} \left[
    \cosh\left( \gamma \left( \Rad{-g_1} - h \right)\right) \right]_{
    \frac {\Delta_g} {\Delta_y} - 6 \Delta_y}^{\Rad{-g1}} dg_1
  \nonumber \\
  \label{eq:star}
  & \sqrt{\lambda \Delta_y} \psi(g_0) \phiHG{h_0}{g_0} \nonumber \\
  &\quad \dhot - \int_{\Lg}^{\Ug} \psi(g_1) \left[ 1 - \cosh \left(
      \gamma \left( \sqrt{24(u-g_1)} - \frac{g_1 - g_0}{ \Delta_y}
      \right) \right) \right] dg_1.
\end{align}
Differentiating \eqref{eq:star}  wrt $g_0$ yields
\begin{align}
  & \sqrt{\lambda \Delta_y} \left[ \psi'(g_0) \phiHG{h_0}{g_0} -
   \psi(g_0)  \frac{12 \gamma}{\sqrt{24(u-g_0)}} \cosh\left( \gamma
      \left(\sqrt{24(u-g_0)} -h_0 \right) \right) \right]  \nonumber\\
  \label{eq:d_star_dg}
  & \quad \equiv D = A + B + C \text{ where} \\
  A &= -\left( 1 - \frac{12 \Delta_y}{\sqrt{24(u-g_0)}} \right)
  \left[ \psi(g_0) + \Delta_y\sqrt{24(u-g_0)} \psi'(g_0) \right]  \nonumber\\
  & \times \left[ 1 - \cosh\left( \gamma \left(\sqrt{24(u-g_0 -
          \Delta_y \sqrt{24(u-g_0)})} - \sqrt{24(u-g_0)} \right)
    \right) \right]  \nonumber\\
  B &= \left[ \psi(g_0) + \Delta_y h_0 \psi'(g_0) \right] \left[ 1 -
    \cosh\left( \gamma \left(\sqrt{24(u-g_0 - \Delta_y h_0)} - h_0
      \right) \right) \right]  \nonumber\\
  C &= \int_{\Lg}^{\Ug} \psi(g_1) \frac{\gamma}{\Delta_y}
  \sinh \left( \gamma \left(
      \sqrt{24(u-g_1)} - \frac{g_1 - g_0 }{ \Delta_y}
    \right) \right) dg_1. \nonumber
\end{align}
If $g_0 > u - 24 \Delta_y^2$ the argument of $\cosh$ in $A$ is
complex, but if
\begin{equation}
  \label{eq:little_g}
  u-g_0 >> \Delta_y,
\end{equation}
then $A = 0 + O(\Delta_y^2)$.  I will drop $A$ assuming
\eqref{eq:little_g} and note that any solutions that I find will not
be valid for large $g$.  If $ u-g $ is large compared to $\Delta_y$,
then replacing $g_1$ with $g_0$ under the square root in $C$ might be
OK too.\marginpar{maybe not}

Integrating the approximation by parts, I find
\begin{align*}
  C &\dhot \int_{\Lg}^{\Ug} \psi(g_1) \frac{\gamma}{\Delta_y}
  \sinh \left( \gamma \left(
      \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
    \right) \right) dg_1 \\
  &= -\left[ \psi(g_1) \cosh \left( \gamma \left(
      \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
    \right) \right)\right]_{\Lg}^{\Ug} \\
& \quad + \int_{\Lg}^{\Ug} \psi'(g_1) \cosh \left( \gamma \left(
      \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
    \right) \right) dg_1.
\end{align*}
The surface terms approximately cancel $B$ and thus
\begin{equation*}
  D \dhot \int_{\Lg}^{\Ug} \psi'(g_1) \cosh \left( \gamma \left(
      \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
    \right) \right) dg_1.
\end{equation*}
Integrating $D$ by parts two more times yields (note
$\frac{\Delta_y}{\gamma} = \sqrt{\lambda \Delta_y}$)
\begin{align}
  D &\dhot -\sqrt{\lambda \Delta_y} \left[ \psi'(g_1) \sinh \left(
      \gamma \left( \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
      \right) \right)\right]_{\Lg}^{\Ug} \nonumber \\
& \quad +\lambda \Delta_y \left[ \psi''(g_1) \cosh \left( \gamma \left(
      \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
    \right) \right)\right]_{\Lg}^{\Ug} \nonumber \\
& \quad - \lambda \Delta_y \int_{\Lg}^{\Ug} \psi'''(g_1) \cosh \left(
  \gamma \left( \sqrt{24(u-g_0)} - \frac{g_1 - g_0 }{ \Delta_y}
  \right) \right) dg_1 \nonumber \\
\label{eq:D_end}
&\dhot \sqrt{\lambda \Delta_y} \psi'(g_0) \sinh \left( \gamma
  \left( \sqrt{24(u-g_0)} - h_0 \right) \right) \\
& \quad +\lambda \Delta_y \psi''(g_0) \left( 1 - \cosh \left( \gamma \left(
      \sqrt{24(u-g_0)} - h_o \right) \right) \right) + O\left(
  \left( \frac{\Delta_y}{\gamma} \right)^3 \right). \nonumber
\end{align}
Substituting \eqref{eq:D_end} into \eqref{eq:d_star_dg} yields
\begin{equation}
  \label{eq:hope}
  \lambda \Delta_y \psi''(g_0) \left( 1 - \cosh \left( \gamma \left(
        \sqrt{24(u-g_0)} - h_o \right) \right) \right) = -\psi(g_0)
  \frac{12 \gamma}{\sqrt{24(u-g_0)}} \cosh\left( \gamma
    \left(\sqrt{24(u-g_0)} -h_0 \right) \right),
\end{equation}
which would be a nice equation if I could find an extra
$\psi(g_0)\frac{12\gamma}{\sqrt{24(u-g_0}}$ for the right hand side.

\newpage

Differentiating \eqref{eq:star}  wrt $h_0$ yields
\newcommand{\temp}{g_0 + h_0 \Delta_y - 6 \Delta_y^2}
\newcommand{\mtemp}{-g_0 - h_0 \Delta_y + 6 \Delta_y^2}
\begin{align}
  & \frac{\lambda \gamma}{\Delta_y} \psi(g_0) \cosh \left( \gamma
    \left( \Rad{-g_0} - h_0 \right) \right) \nonumber \\
  &= - \psi\left( \temp \right)
  \left[ \cosh
    \left( \gamma
      \left( \Rad{\mtemp} - h \right)
    \right)
  \right]^{\Rad{\mtemp}}_ {\frac{h_0 \Delta_y - 6
      \Delta_y^2}{\Delta_y}} \nonumber \\
  \label{eq:DEG}
  &= \psi\left( \temp \right)
  \left[ \cosh
    \left( \gamma
      \left( \Rad{\mtemp} - h_0 + 6 \Delta_y \right)
    \right) -1
  \right].
\end{align}
The result seems implausible for two reasons that I will explain.  My
explanations assume that $\lim_{\Delta_y \rightarrow 0}
\lambda(\Delta_y) = 1$ and consequently that $\lim_{\Delta_y
  \rightarrow 0} \gamma(\Delta_y) = \sqrt{\Delta_y}$.
\begin{enumerate}
\item The ways that the variable $h_0$ appear seem to require that
  $\psi$ must depend on $h_0$, contradicting the assumption behind its
  definition.  I will assume that $h_0=0$ and later ensure that
  nonzero values are consistent with any solution that satisfies the
  second objection.
\item To lowest order in $\Delta_y$, the equation is roughly
  \begin{equation*}
    \frac{1}{\sqrt{\Delta_y}} \psi(g_0) = \psi(g_0 - 6\Delta_y^2)
    24(u-g_0) \Delta_y,
  \end{equation*}
  which suggests
  \begin{align*}
    \psi(g) &= \left( \psi(g) - 6\Delta_y^2 \psi'(g) \right) 24(u-g)
    \Delta_y^{\frac{3}{2}} \\
    \psi'(g_0) &= \frac{\Delta_y^{\frac{3}{2}} 24 (u-g_0) - 1} {144
      \Delta_y^{\frac{7}{2}} (u-g_0)} \psi(g_0) \\
    \psi'(g_0) &= \frac{- 1} {144 \Delta_y^{\frac{7}{2}} (u-g_0)} \psi(g_0)
  \end{align*}
  and solutions for $\psi(g)$ that decay with an exponent of order
  $\Delta_y^{ - \frac{7}{2}}$.  I had hoped for convergence of
  eigenfunctions as $\Delta_y \rightarrow 0$.
\end{enumerate}

I will proceed in the following two steps:
\begin{enumerate}
\item Assume $h_0=0$ and derive an ODE for $\psi$ from \eqref{eq:DEG}
  that is accurate to second order in $\Delta_y$ and fourth order
  $\gamma$.
\item Solve that ODE and with luck find $\lambda$.
\item Substitute the solution for $\psi$ into \eqref{eq:DEG} and
  ensure that at least values and first two derivatives wrt to $h_0$
  are equal.
\end{enumerate}

If $h_0 = 0$, then \eqref{eq:DEG} becomes
\renewcommand{\temp}{g_0 - 6\Delta_y^2}
\renewcommand{\mtemp}{-g_0 +6\Delta_y^2}
\begin{align*}
  & \frac{\lambda \gamma}{\Delta_y} \psi(g_0) \cosh \left( \gamma
    \left( \Rad{-g_0} \right) \right) \\
  &= \psi\left( \temp \right)
  \left[ \cosh
    \left( \gamma
      \left( \Rad{\mtemp} + 6 \Delta_y \right)
    \right) -1
  \right].
\end{align*}
To lowest order in $\Delta_y$ I find,
\newcommand{\coshg}{\cosh\left(\gamma \Rad{-g_0} \right)}
\begin{align*}
  & \frac{1}{\gamma} \psi(g_0)
  \cosh \left( \gamma \left( \Rad{-g_0} \right) \right) \\
  &= \left[\psi(g_0) - 6\Delta_y^2\psi'(g_0) \right]
  \left[ \cosh \left( \gamma \left( \Rad{-g_0} \right) \right) - 1
  \right] \\
  \psi'(g_0) &= \frac{(\gamma-1)\coshg - \gamma }{\gamma 6 \Delta_y^2
  \left( \coshg - 1 \right)} \psi(g_0).
\end{align*}
In the next sequence, I drop all but the lowest order terms in
$\gamma$ and use the first two terms in the Taylor series for
$\cosh$:
\begin{align*}
  \psi'(g_0) &\dhot -\frac{\coshg}{6\Delta_y^2 \gamma \left( \coshg -
      1 \right)} \psi(g_0) \\
  &\dhot -\frac{1}{3 \Delta_y^2 \gamma^3 24(u-g_0)} \psi(g_0) \\
  &= -\frac{1}{72 \Delta_y^2 \gamma^3 (u-g_0)} \psi(g_0).
\end{align*}
Then using the following form for solving the ODE
\begin{align*}
  y' &= \frac{by}{a-x} \text{ ODE} \\
  y(x) &= c_1 (a-x)^b \text{ solution},
\end{align*}
I find
\begin{equation}
  \label{eq:1}
  \psi(g_0) = (u - g_0)^{-\frac{1}{72 \Delta_y^2 \gamma^3}} =
  \left( \frac{1}{u - g_0} \right)^{\frac{\lambda \sqrt{\lambda}}{72
      \Delta_y^3 \sqrt{\Delta_y}}}
\end{equation}
The result is unsatisfactory because it is undefined at $g_0 = u$.

I start again.  Let $h_0=0$ and $\phi(h,g)=\phiHG{h}{g}$, then from
\eqref{eq:integralULq}
\renewcommand{\LgQ}{g_0 - 6\Delta_y^2}
\begin{align}
  & \lambda \psi(g_0) \sinh\left(\gamma \sqrt{24(u-g_0)} \right) \nonumber \\
  &= \int_{\LgQ}^{\UgQ} \psi(g_1) \int_{\LhQ}^{\Uh}~ \phiHG{h_1}{g_1}
  ~dh_1 dg_1 \nonumber \\
  & \lambda \gamma \psi(g_0) \sinh\left(\gamma \sqrt{24(u-g_0)}
  \right) \nonumber \\
  \label{eq:Ione}
  &= \int_{\LgQ}^{\UgQ} \psi(g_1) \left[ \cosh \left(
      \gamma \left( \sqrt{24(u-g_1)} - \frac{g_1 - g_0}{\Delta_y} + 6
        \Delta_y \right) \right) -1 \right] dg_1 \\
  &\equiv I_1 \nonumber
\end{align}
From the following change of variable
\begin{align*}
   t(g) &= \gamma \left( \sqrt{24(u-g)} - \frac{g - g_0}{\Delta_y} +
     6 \Delta_y \right)\\
   g(t) &= g_0 - 6 \Delta_y^2 - \frac{\Delta_y t}{\gamma} +
   \Delta_y \sqrt{24 \left( u-g_0 + \frac{t\Delta_y}{\gamma} \right)} \\
   \frac{d g}{ dt} &= \frac{\Delta_y}{\gamma} \left( \frac{12 \Delta_y} {
       \sqrt{ 24 \left( u - g_0 + \frac{t \Delta_y}{\gamma} \right) }}
     - 1 \right) \\
   \frac{d^2 g}{ dt^2} &= -\frac{144 \Delta_y^3}{\gamma^2}
   \left( 24
     \left( u - g_0 + \frac{t \Delta_y}{\gamma} \right)
   \right)^{-\frac{3}{2}},
\end{align*}
I calculate
\begin{align*}
  t \left( \UgQ \right) &= 0 \\
  t \left( \LgQ \right) &= \gamma \left(
    \sqrt{24\left( u - g_0 + 6 \Delta_y^2 \right) } + 12 \Delta_y \right),
\end{align*}
and write
\newcommand{\LOW}{{\gamma \left( \sqrt{24(u- g_0 + 6\Delta_y^2 )} +
      12\Delta_y \right)}}
\newcommand{\RANGE}{_\LOW ^0}
\newcommand{\INT}{\int\RANGE}
\begin{align*}
  I_1 &= \INT \psi(g(t)) \frac{d g}{d t} [\cosh(t) -1] dt \\
  &\dhot \INT \left[ \psi(g(0)) + t \psi'(g(0)) \right]
  \left[ \left. \frac{d g}{d t} \right|_{t=0}
    + t \left. \frac{d^2g}{d t^2} \right|_{t=0}
  \right]
  [\cosh(t) -1] dt \\
  &\equiv I_{A} +  I_{B} +  I_{C}
\end{align*}
\newcommand{\loit}{=_\text{loit}}
\newcommand{\DG}{\frac{\Delta_y}{\gamma} \left( \frac{12 \Delta_y}
    { \sqrt{24(u-g_0)} } - 1 \right) }
\newcommand{\DDG}{\frac{144 \Delta^2_y}{\gamma^2} \left(24(u-g_0)
  \right)^{-\frac{3}{2}} }
where (using the notation $\loit$ for \emph{low order in $t$})
\begin{align*}
  I_{A} &= \psi(g(0)) \left. \frac{d g}{d t} \right|_{t=0}
  \INT [\cosh(t) - 1] dt \\
  &= \psi(g(0)) \left. \frac{d g}{d t} \right|_{t=0}
  [\sinh(t) - t]\RANGE \\
  &\loit \psi(g(0)) \left. \frac{d g}{d t} \right|_{t=0}
  \left[\frac{t^3}{6} \right]\RANGE\\
  &= -\psi(g(0)) \DG \frac{ \left( \LOW \right)^3 }{6} \equiv \tilde
  I_A \\
  I_{B} &=  \left(
    \psi(g(0)) \left. \frac{d^2 g}{d t^2} \right|_{t=0}
    + \psi'(g(0)) \left. \frac{d g}{d t} \right|_{t=0}
  \right)
  \INT t [\cosh(t) - 1] dt \\
  &=  \left(
    \psi(g(0)) \left. \frac{d^2 g}{d t^2} \right|_{t=0}
    + \psi'(g(0)) \left. \frac{d g}{d t} \right|_{t=0}
  \right)
  \left[ t\sinh(t) - \cosh(t) - \frac{t^2}{2} \right] \RANGE \\
  &\loit  \left(
    \psi(g(0)) \left. \frac{d^2 g}{d t^2} \right|_{t=0}
    + \psi'(g(0)) \left. \frac{d g}{d t} \right|_{t=0}
  \right)
  \left[ \frac{t^4}{8} -1 \right]\RANGE \\
  &= \left( \psi(g(0)) \DDG + \psi'(g(0)) \DG \right) \\
  &\quad \times \frac{ \left( \LOW \right)^4 }{8} \equiv \tilde I_B\\
  I_{C} &= \psi'(g(0)) \left. \frac{d^2 g}{d t^2} \right|_{t=0}
  \INT t^2 [\cosh(t) - 1 ]dt \\
  &\loit \psi'(g(0)) \left. \frac{d^2 g}{d t^2} \right|_{t=0} \left[
    \frac{ t^5 }{ 8 } \right]\RANGE
\end{align*}

The equations
\begin{align*}
  \lambda \psi(g_0) \sinh\left(\gamma \sqrt{24(u-g_0)} \right) &= I_A
  + I_B \\
  \lambda \psi(g_0) \sinh\left(\gamma \sqrt{24(u-g_0)} \right) &=
  \tilde I_A + \tilde I_B
\end{align*}
are each linear first order ODEs for $\psi$ which a better man than I
might solve.

I used the following integrals copied from Wikipedia
\begin{align*}
  \int \sinh (ax)\,dx &= a^{-1} \cosh (ax) + C \\
  \int \cosh (ax)\,dx &= a^{-1} \sinh (ax) + C \\
  \int x\cosh ax\,dx &= \frac{1}{a} x\sinh ax - \frac{1}{a^2}\cosh
  ax+C,
\end{align*} 
and this from alpha
\begin{align*}
  & \int e^{ax} \sinh(b(x+c)) \\
  &= e^{ax} \frac{a \sinh(b(c+x)) - b \cosh(b(c+x))}{(a-b)(a+b)}
\end{align*}

\section{Second Order}
\label{sec:second-order}

Rather than consider states composed of position and derivative, I
return to using a state at $y_n$ that is determined by $g_n$ and
$g_{n-1}$ with
\begin{equation*}
  z_n \equiv \left(g_n, \frac{g_n - g_{n-1}}{\Delta_y} \right) \equiv
  (g_n, h_n).
\end{equation*}
Values of $g_n$ must lie between the upper and lower bounds $u$ and
$-u$, and the two curves that go through $g_n$ and are tangent to the
upper bound provide the following bounds\footnote{These bounds follow
  from the derivation of \eqref{eq:DG_DY} and symmetry.} on $h_n$
\begin{equation*}
  \sqrt{24(u - g_n)} - 6 \Delta_y \geq h_n \geq - \sqrt{24(u - g_n)} -
  6 \Delta_y.
\end{equation*}

Given $g_0$ and $g_1$, the upper bound on $g_2$ is given by the
tangent to the upper bound that goes through $g_1$ and the lower bound
is given by the \emph{line} (straight in $(x,f)$ coordinates,
quadratic with second derivative -12 in $(g,y)$ coordinates) that goes
through $g_0$ and $g_1$.  Those constraints imply\footnote{Derivation
  solves for $g_2$ given $g_0$ and $g_1$ as follows
  \begin{align*}
    g_2 &= a - 6(b-y_2)^2 \\
    &= g_0 + 6(b-y_0)^2 - 6(b-y_2)^2 \\
    &= g_0 + 12b(y_2 - y_1) + 6(y_0^2 - y_0^2) \\
    b &= \frac{ g_1 - g_0 + 6(y_1^2 - y_0^2)}{12(y_1 - y_0)} \\
    g_2 &= g_0 + \frac{y_2 - y_0}{y_1-y_0} \left( g_1 - g_0 + 6
      \Delta_y (y_1 + y_0) \right) - 6 (y_2 - y_0)(y_2 + y_0) \\
    &= g_0 + 2 \left( g_1 - g_0 + 6
      \Delta_y (y_1 + y_0) \right) - 12\Delta_y (y_2 + y_0) \\
    &= 2g_1 - g_0 - 12 \Delta_y^2
  \end{align*}
  Note:
  \begin{equation*}
    g_1 + \Delta_y \sqrt{24(u - g_1)} - 6 \Delta^2_y = g_2 \iff
    g_2 + \Delta_y \sqrt{24(u - g_2)} - 6 \Delta^2_y = g_1 .
  \end{equation*}
}
\begin{subequations}
  \label{eq:bounds}
  \begin{align}
     g_1 + \Delta_y \sqrt{24(u - g_1)} - 6 \Delta^2_y &\geq g_2 \geq
     2g_1 - g_0 - 12 \Delta_y^2 \\
     \sqrt{24(u - g_1)}  - 6 \Delta_y &\geq h_2 \geq h_1 - 12 \Delta_y
     \\
    g_2 &= g_1 + h_2.
  \end{align}
\end{subequations}

I define the indicator function
\begin{equation*}
  I(h_1, g_1, h_2, g_2) =
  \begin{cases}
    1 \text{ if constraints of \eqref{eq:bounds} hold} \\
    0 \text{ otherwise}
  \end{cases}
\end{equation*}
and write
\renewcommand{\RANGE}{_{h_0 - 12 \Delta_y}^{\sqrt{24(u-g_0)} - 6 \Delta_y}}
\begin{align}
  \lambda \rho(h_0,g_0) &= \int \rho(h_1, g_1) I( h_0, g_0, h_1, g_1)
  d h_1 d g_1 \nonumber \\
  &= \int\RANGE \int_{-u}^u \rho(h_1, g_1) \, \delta(g_1,g_0 +
  \Delta_y h_1) d g_1 \, dh_1 \nonumber \\
  \label{eq:Int}
  &= \Delta_y \int\RANGE \rho(h_1, g_0 + \Delta_y h_1) \, dh_1 .
\end{align}
Notice that at the boundary, ie, $h_0 = \Rad{-g_0} - 6\Delta_y^2$, the
integral is
\begin{equation*}
  \lambda \rho(\Rad{-g_0} - 6\Delta_y^2,g_0) = \Delta_y
  \int_{\Rad{-g_0} - 18\Delta_y^2} ^{\Rad{-g_0} - 6\Delta_y^2}
  \rho(h_1, g_0 + \Delta_y h_1) \, dh_1.
\end{equation*}
That suggests that $\rho$ is not zero at the upper boundary and that
$\lambda \approx 12 \Delta^3_y$.  \marginpar{Fix me} I expect
$\lambda$ to be exponential in $\Delta_y$ and $\rho$ to be small at
the upper boundary.

In the following I discard terms that are higher than second order in
$\Delta_y$.  Differentiating \eqref{eq:Int}  wrt to $h_0$ yields 
\begin{align*}
  \rho(h_0,g_0) &= \frac{\Delta_y}{\lambda} \int\RANGE
  \rho(h_1, g_0)  + h_g \Delta_y \rho_g(h_1, g_0) \, dh_1 \\
  \rho_h(h_0, g_0) &= - \frac{\Delta_y}{\lambda} \left( \rho(h_0
    -12\Delta_y, g_0) + \left(h_0 - 12\Delta_y \right) \Delta_y \rho_g
    ( h_0 - 12\Delta_y, g_0) \right) \\
  &= - \frac{\Delta_y}{\lambda} \rho( h_0, g_0) + \frac{12
    \Delta^2_y}{\lambda} \rho_h( h_0, g_0) -
  \frac{\Delta^2_y}{\lambda} h_0 \rho_g(h_0, g_0) \\
  0 &= \left( \frac{12 \Delta^2_y}{\lambda} - 1 \right) \rho_h( h_0, g_0)
  - \frac{\Delta_y}{\lambda} \rho( h_0, g_0) -
  \frac{\Delta^2_y}{\lambda} h_0 \rho_g(h_0, g_0).
\end{align*}
Differentiating \eqref{eq:Int}  wrt to $g_0$ yields 
\begin{align*}
  \rho_g(h_0, g_0) %
  &= \frac{-12 \Delta_y} {\lambda \Rad{-g_0}}
  \rho\left( \Rad{-g_0} - 6\Delta_y, g_0  +\Delta_y \left( \Rad{-g_0}
      - 6 \Delta_y \right) \right) \\
  & \quad + \frac{\Delta_y}{\lambda} \int\RANGE \rho_g(h_1, g_0 +
  \Delta_y h_1) \, dh_1 \\
  &= \frac{-12 \Delta_y} {\lambda \Rad{-g_0}}
  \rho\left( \Rad{-g_0} -6\Delta_y, g_0  \right) \\
  & \quad + \frac{-12 \Delta^2_y} {\lambda}
  \rho_g\left( \Rad{-g_0} -6\Delta_y, g_0  \right) \\
  & \quad + \frac{\Delta_y}{\lambda} \int\RANGE \rho_g(h_1, g_0 +
  \Delta_y h_1) \, dh_1.
\end{align*}

\section{Appendix}
\label{sec:appendix}

\subsection{Build Times}
\label{sec:build-times}
\begin{center}
  \begin{tabular}{|l|l|l|l|}
    \hline
    Target     & Watcher & Xray-r09 & Orr \\ \hline
    juq.pdf    & 125:00  & 250:00   & tbd \\
    notes.pdf  & 2:00    &   3:24   & tbd \\ \hline
  \end{tabular}
\end{center}

\subsection{Pricing Measurements: Experimental Design}
\label{sec:pricing}

\subsection{To Do}
\begin{itemize}
\item Think about uses of PCA:
  \begin{itemize}
  \item Find and characterize variation orthogonal to $q_0$ and $q_1$
  \item Price new experiments
  \end{itemize}
\item Think about the utility of feature selection.  What is the use
  of a low dimensional function of measurements?
\end{itemize}

\subsection{Elliptical Approximation of Cross Sections of
  $\PolytopeN$}
\label{sec:ellipse}
For a distribution with
\begin{equation*}
  \Sigma = \begin{bmatrix} \frac{R^2}{2} &0 \\ 0 &
    \frac{R^2}{2} \end{bmatrix} 
\end{equation*}
the RMS radius is $R$.  I calculate the variance of a uniform
distribution over a circle of radius $S$ as follows
\begin{align*}
  \sigma_{x,x} &= \frac{1}{2} \EV{r}{r^2} \\
  &= \frac{1}{2\pi S^2} \int_0^S 2\pi r r^2 \, dr \\
  &= \frac{S^2}{4} \text{ or}\\
  2\sqrt{\sigma_{x,x}} &= S.
\end{align*}
The following inequality describes the disk in terms of the covariance:
\begin{align*}
  \begin{bmatrix} x & y \end{bmatrix} \cdot \Sigma^{-1}
  \cdot \begin{bmatrix} x\\y \end{bmatrix} &= \frac{4}{S^2} 
  \left( x^2 + y^2 \right) \\
  &= 4 \frac{r^2}{S^2} \leq 4 &\text{ hence the requirement:} \\
  \begin{bmatrix} x & y \end{bmatrix} \cdot \Sigma^{-1}
  \cdot \begin{bmatrix} x\\y \end{bmatrix} &\leq 4  
\end{align*}

\subsection{CJ Conditions}
\label{sec:CJ}

Put equations 2.11 and 2.12 on page 19 of Fickett and Davis in terms
of density rather than specific volume.
\begin{align}
  v &= \frac{1}{\rho} \nonumber \\
  \label{eq:dpdrho}
  \frac{d\, P(v(\rho))}{d\,\rho} &= \frac{d\, P}{d\, v} \frac{d\,
    v}{d\, \rho} = \frac{-1}{\rho^2} \frac{d\, P}{d\, v}
\end{align}
Combining 2.11 and 2.12 of Fickett and Davis and supposing $p_0 = 0$
yields
\begin{equation*}
  \left. \frac{d\, P}{d\, v} \right|_{\cal H} = \frac{2P}{v-v_0}
  \frac{P}{v_0-v}.
\end{equation*}
In the following I translate from volume to density
\begin{align*}
  \rho^2 \frac{d\, P}{d\, \rho} &= \frac{P}{v_0-v} \\
  &= \frac{P}{\frac{1}{\rho_0}-\frac{1}{\rho}} \\
  \frac{d\, P}{d\, \rho} &= \frac{P}{\frac{\rho^2}{\rho_0}-\rho} \\
  &= \frac{\rho_0 P}{\rho^2 - \rho_0\rho}
\end{align*}
Solving for $P$ yields
\begin{equation*}
  P = \frac{\rho(\rho - \rho_0) }{\rho_0}\frac{d\, P}{d\, \rho}.
\end{equation*}

\subsection{Convex in $v$ or $\rho$}
\label{sec:convex}

Starting from \eqref{eq:dpdrho}
\begin{align*}
  \frac{d^2\, P(v(\rho))}{d\,\rho^2} &=
       \frac{d\, P}{d\, v} \frac{2}{\rho^3} +
       \frac{d^2\, P}{d\, v^2} \frac{1}{\rho^4} \\
  P_{v,v} &= \rho^4P_{\rho,\rho} - 2\rho P_v \\
  P_{v,v} &> 0 ~ \implies ~ P_{\rho, \rho} > 2\frac{P_v}{\rho^3}
            = -2\frac{P_\rho}{\rho} \\
  & \text{because}\\
  P_\rho &= -\frac{P_v}{\rho^2} ~\& ~ P_v = -\rho^2 P_\rho
\end{align*}



\end{document}

%%%---------------
%%% Local Variables:
%%% eval: (TeX-PDF-mode)
%%% eval: (setq ispell-personal-dictionary "./localdict")
%%% End:
