\documentclass[12pt]{article}
\usepackage{layout,pdfsync,latexsym, array, enumerate, amsmath, amsthm,amssymb, amsfonts,natbib, subfigure}
\usepackage[mathscr]{eucal}
\usepackage{epsf,epsfig,eufrak,dsfont}

\bibliographystyle{apalike}

\textwidth 6.5in \textheight 9.00in \oddsidemargin -0.15in
\evensidemargin -0.15in \topmargin -0.25in
\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}{Corollary}[theorem]
\newtheorem{example}{Example}[section]
\newtheorem{lemma}{Lemma}[section]
\newtheorem{defn}{Definition}[section]
\newcommand{\lowtilde}[1]{\mathop{#1}\limits_{\textstyle\tilde{}}}
%\renewcommand{\baselinestretch}{1.4}

\newcommand{\off}{\mathcal{O}}
\newcommand{\poly}{{\cal P}(u-x;\mathbf{a})}
\newcommand{\lik}{\ensuremath{\mathcal{L}}}
\newcommand{\map}{\ensuremath{\mathcal{M}}}
\newcommand{\area}[1]{\ensuremath{|\!|#1|\!|}}
\newcommand{\norm}[1]{\ensuremath{\left|\!\left|#1\right|\!\right|_1}}
\newcommand{\dee}[1]{\ensuremath{\,\mathrm{d}#1}}
\newcommand{\Steve}[1]{{\tt (#1)}}

\begin{document}
\title{Local-EM and the EMS Algorithm}
\author{Chun-Po Steve Fan\footnote{Chun-Po Steve Fan is a doctoral student in the Dalla Lana School of Public Health, University of Toronto}, Jamie Stafford\footnote{University of Toronto} and Patrick E. Brown\footnote{University of Toronto and Cancer Care Ontario} }
\thispagestyle{empty}
\date{}
\maketitle
%\renewcommand{\baselinestretch}{1.4}
\begin{abstract}
The use of local likelihood methods (Hastie and Tibshirani 1987, Loader 1999) in the presence of data that is either interval or area censored leads naturally to the
consideration of EM-type strategies, or rather local-EM algorithms.
In this paper we consider a class of local-EM algorithms suitable for density or intensity estimation in the temporal or spatial context. We demonstrate that using a piecewise constant density function at the E-step results in the algorithm collapsing
explicitly into an EMS algorithm of the type considered by \cite{silvermanems}. 

This discovery has two advantages. Identifying a relationship between local likelihood and the EMS algorithm means the former provides a natural context for the latter, which is often referred to as {\it ad hoc} in the literature. In addition, the latter provides a set of tools to guide the use, and implementation, of local-EM algorithms. For example, we
expose a previously unknown connection between local-EM algorithms and penalized likelihood that is analogous to the more familiar pairing of EM and likelihood. Examples include exploring the spatial structure of the disease Lupus in the City of Toronto.
\end{abstract}

\vspace{10pt} \small \it Keywords: density estimation; intensity
estimation; interval and area censoring; local-EM; panel counts; lupus; penalized
likelihood; self-consistency \rm \normalsize

\section{Introduction}
In this paper we consider the use of local likelihoods for density and intensity estimation when data are only partially observed. Here data may be interval censored,
they may be temporal and come in the form of panels counts, or they may be spatial and area censored. Whatever form the censoring takes, the use of local likelihood techniques naturally leads to the consideration of local-EM algorithms.

The use of local-EM algorithms has precedents in the literature. Examples include \cite{tanner1987mi}, \cite{betensky1999leh}, \cite{braun2005lld}, \cite{tolusso2008red} where the method of implementation varies. For instance implementation of the E-step can involve multiple imputation, Monte Carlo integration, numerical integration integration and so on. Despite interesting developments given convergence of the local-EM algorithm is difficult to demonstrate and it was not clear whether its fixed point maximized any particular criterion. For example, while we know EM is a tool useful for maximizing likelihoods or posterior distributions, what is the analogy for local-EM?

In this paper we consider simplifying the E-step of a local-EM algorithm by approximating conditional expectations using a piecewise constant function. This results in the local-EM algorithm collapsing explicitly into an EMS algorithm of the type proposed in \cite{silvermanems}. An EMS algorithm results when a smoothing step is added to the expectation and maximization steps of the usual EM algorithm. \cite{silvermanems} refers to the method as being {\it ad hoc}. Identifying the relationship between local-EM and EMS has two advantages. First, it embeds the EMS algorithm in the local likelihood context where it is seen to arise naturally as an implementation of a local-EM algorithm; thus it is not {\it ad hoc}%
\footnote[1]{\cite{nychka1990spa} demonstrates that a modified EMS algorithm is related to penalized likelihood. As a result he also suggests that the EMS algorithm is not {\it ad hoc}.}. Details are given in \S 3.
Secondly, the EMS algorithm has been extensively studied, and much is known about its convergence \cite[]{latham1995ems} and its relationship to the penalized likelihood~\cite[]{nychka1990spa}. The latter suggests a previously unknown connection between local-EM algorithms and penalized likelihood, which is analogous to the more familiar pairing of EM and likelihood.

The paper has the following structure. In \S 2, we summarize a common notation used for three contexts: failure time processes, temporal and spatial  inhomogeneous Poisson processes. We return to each context in the examples given in \S 4 which collectively serve to demonstrate the results of \S 3. The examples concern density estimation for failure time data, intensity estimation for panel count data, and finally the spatial distribution of the disease lupus in the Greater Toronto Area. This last example may be viewed as extending the image reconstruction techniques of \cite[]{ silvermanems} to an epidemiological setting. One point of interest is that the local-EM algorithm is seen to explicitly extend the self-consistency algorithms of \cite{turnbull1976edf}, \cite{hu2008gls} and \cite{vardi1985pet}. Another is that, in simple cases, the local-EM algorithm reduces to the methods of Jones (1989) for smoothing histograms and \cite{brillinger1990st, brillinger1991si, brillinger1994esp} for smoothing spatially aggregated data.

In \S 5 we exploit the relationship between local-EM and the EMS algorithm to gain insight into convergence issues and to expose the role of local-EM. In particular, in \S 5.2 we summarize results that suggest, at least
for the contexts considered in this paper, that local-EM and penalized
likelihood may be paired in a manner analogous to the pairing of EM
and likelihood. In \S 5.1 we prove
the ${\cal L}^1$
convergence of the EMS iterate of \S 3 to its local-EM counterpart. This,
and the developments of \S 3, suggest local-EM and EMS
techniques may be thought of synonymously. In \S 5.2 we demonstrate that the use of an equivalent
kernel in a local-EM algorithm leads to the modification necessary
to maximize of a penalized likelihood (Nychka 1990). We also prove that the modified EMS iterate converges to its local-EM counterpart. Details of this proof and that of \S 5.1 are given in the appendix. Finally, in \S 5.3 we study
the penalty of \S 5.2 under the conditions of \S 5.1 and conclude that local-EM penalizes the usual nonparametric likelihood for departures of the target function from the maximal of eigenfunction of the kernel.  Here an example (\cite{maathuis2005ran}) involving hypothetical bivariate interval censored data, where the NPMLE is not unique but the local-EM mysteriously appears to be, can be understood in terms of penalized likelihood. Concluding remarks may be found in \S 6. 


Finally, it is the results of \S 3 \& 5 that constitute the major contributions of this paper. Important issues that concern consistency of local-EM estimators, asymptotic IMSE, and bandwidth selection are not treated. However, they are not ignored either. Simple cross validation is used to select an approriate bandwidth in the third example and the second example presents a simple simulation study that favour local-EM and suggest is has reasonable asymptotic properties. Rigorous treatment of these issues is beyond the scope of this paper.

\section{Some initial details}

In the context of this paper we assume a study consists of $n$
independent subjects where the time or location of events follows either a failure time process or
an inhomogeneous Poisson process. Events are partially observed where
they are only known to have fallen into a particular interval of time or region in space. To permit general developments in \S 3 we introduce a common notation where its meaning is context dependent. For example, in the spacial context $\cal M$ will represent a geographic region, while in the temporal context it represents $\Re$ or $\Re^+$.

\subsection{Processes in time}
For processes in time each subject is observed at a set of points
${\cal T}_i=\{\tau_{ij}, j = 1 \ldots J_i\}$ that are either prearranged or determined by a visit
process that is assumed to be independent of the event process. 

When the event process for each subject is a common failure time process we denote the failure time by
$X_i$  and we assume it eithers fall between two adjacent elements of ${\cal T}_i$
or that it is right censored. In either case $X_i$ is interval censored
where we denote the relevant interval as $S_i=[L_i,R_i]$ where
$L_i,R_i\in{\cal T}_i\cup \{\infty\}$ and of course $X_i\in S_i$.
Note that if the event time for the $i$th subject is right censored
we set $R_i=\infty$. The observed data is then a sequence of
independent intervals $S_1 \ldots S_n$ some of which may overlap. We
let ${\cal Q}=\{Q_j; j=1 \ldots,J \}$ denote the partition of the data
defined by the collection of endpoints $\{L_i,R_i;i=1,\ldots,n\}$.
For example, if $n=2$ and $S_1=[0,3],~S_2=[1,2]$ then we would have
${\cal Q}=\{[0,1],[1,2],[2,3]\}$. For this setting the density, $\lambda(x)$, of the failure time process is the object of central interest.

When the event process for each subject is a common inhomogeneous Poisson process 
we denote the collection of events for the $i$th subject  as $X_i=\{X_{ik} ; k=1 \ldots N_i\}$ where $N_i$ denotes the number of events observed
for the $i$th subject. Here 
event times are still interval
censored but there may be multiple events in each interval. In this setting
$S_{ij}=[\tau_{ij},\tau_{ij+1}]$ is referred to as the $j$th panel
for the $i$th individual and we denote the number of events in the
interval $S_{ij}$ by $N_{ij}=\#\{ X_{ik} \in S_{ij} \}$.
Following the setup of \cite{hu2008gls} we let
$${\cal T}=\cup_i^n {\cal T}_i=\{\tau_j; j= 0 \ldots J\}$$
and again
${\cal Q}$ denotes a partition of the data
where now $Q_j=[\tau_{j-1},\tau_j]$. For this setting the intensity, $\lambda(x)$, of the Poisson process is the object of central interest.

\subsection{Processes in space}
Rather than time, we may consider a series of inhomogeneous Poisson processes in space. Here
the $X_i=\{X_{ik} ; k=1 \ldots N_i\}$ now represent sets of event locations for overlapping regions, or maps, ${\cal M}_i$ whose union we denote as ${\cal M}=\cup_i {\cal M}_i$. To accommodate nuisance sources of spatial variation we assume the intensity surface of $X_i$ takes the form
$$
\rho_i(s) = {\cal O}_i(s) \lambda(s).
$$
Here ${\cal O}_i(s)$ is an offset surface known \emph{a priori} and
$\lambda(s)$ is a relative risk surface that is assumed to be smooth. 

The challenge in this context is that the maps have differing tessellations. We denote a subregion of map ${\cal M}_i$ by $S_{ij}$ and the partition ${\cal Q}$ now results from overlaying the $n$ individual maps. Here the $X_{ik}$ are now assumed to be area censored and hence they are only known to fall in some subregion $S_{ij}$. Finally, for reasons given in \S 5.3 it is reasonable to assume that the offset surface $O_i$ is constant over all regions $S_{ij}$ such that
$$
{\cal O}_i(u)={\cal O}_{ij}, ~u\in S_{ij}.
$$

\vspace{15pt}
Finally, for both the temporal and spatial context we have suggested the partition $\cal Q$ may be determined in a natural and obvious way from the data. However, there is no particular argument against choosing $\cal Q$ in any convenient fashion, say, as a grid on an axis or as a set of pixels for a map. In fact, for the theoretical developments in \S 4 and the appendix, it becomes convenient to consider an arbitrary partition whose elements shrink in volume. In any case, while the examples of \S 5 concern data driven partitions the results given in \S 3 \& 4 hold for any particular partition.

\section{Local-EM and the EMS algorithm}\label{sec:llems_alg}
In this section we consider the use of local likelihoods for the flexible estimation of the function $\lambda$ for the settings considered in \S 2. The treatment is general with the local likelihood having the form
\begin{eqnarray}\label{llge}
{\cal L}_x(\lambda)=\sum_{ik} K_h(X_{ik}-x) \log\{\lambda(X_{ik})\}
- \sum_{i}\int_{\cal M} \off_i(u)K_h(u-x)\lambda(u) \,\mathrm{d}u.
\end{eqnarray}
Here $K_h(z)= K(z/h)/h$ is a positive kernel function with
$\int K(z) \,\mathrm{d}z = 1$. Following \cite{loader1999lra} we consider the
polynomial approximation
\begin{eqnarray*}
\log\{\lambda(u)\}\approx{\cal P}(u-x)=\sum_{j=0}^p a_j(u - x)^j.
\end{eqnarray*}
 with coefficients $\mathbf{
a}=\{{a}_{0},{a}_{1},\ldots,{a}_{p}\}$.
Substituting the above into expression (\ref{llge}) and maximizing with respect to $\mathbf{a}$ yields the estimate
$\hat\lambda(x) = \exp(\hat{a}_0)$.
\subsection{Local-EM}
When the data are interval or area censored with $X_{ik} \in S_{ij}$, one may consider
replacing (\ref{llge}) with
\begin{multline}\label{llicdp}
{\cal L}_x({\bf a})=\sum_{ik}E_\lambda [K_h(X_{ik}-x){\cal P}(X_{ik}-x;\mathbf{a})|X_{ik}\in S_{ij}]\\
- \sum_{i}\int_{\cal M}{\cal O}_i(u) K_h(u-x)
\exp\{{\cal P}(u - x;\mathbf{a}) \} \dee{u}.
\end{multline}
This leads to a local-EM algorithm that cycles through two steps at
each iteration:
\begin{quote}
\begin{description}
\item[E-step:] compute the relevant expectations using the current
estimate of $\lambda(\cdot)$
\item[M-step:] maximize ${\cal L}_x({\bf a})$ to get updated estimates of
$\mathbf{a}$ and $\lambda(\cdot)$.
\end{description}
\end{quote}
The algorithm differs from a typical EM algorithm because, at the
E-step, expectation is computed with respect to an estimate of the
infinite dimensional parameter $\lambda$ while, at the M-step, we only
estimate this parameter locally at $x$. As such the typical
arguments concerning convergence of the EM algorithm cannot be
brought to bear. Furthermore, if the local-EM algorithm converges to
a fixed point $\hat{\lambda}$, it is not clear what criterion this fixed point
optimizes.

Since the $X_i$ are assumed to be realizations of a Poisson process the first term of (\ref{llicdp}) may be written as $\sum_{ij}N_{ij} E_\lambda [K_h(X-x){\cal P}(X-x;\mathbf{a})|X \in S_{ij}]$, and in general the local-EM algorithm can be written as
\begin{eqnarray}\label{localem}
\hat{\lambda}^{r+1}(x)=\sum_{ij}
N_{ij}\mbox{E}_{\hat{\lambda}^{r}}\left[\left.
K_h\left({X-x}\right)\right|X\in S_{ij}\right]/\Psi_h(\hat{\bf a}^{r+1})
\end{eqnarray}
where
\begin{eqnarray*}
\Psi_h(x;\bf a)&=&\sum_{i}\int_{\cal M}{\cal O}_i(u)K_h(u-x)\exp\left\{{\cal P}(u - x)-a_0\right\} \dee{u}
\end{eqnarray*}
and ${\bf \hat{a}}^{r+1}$ solves the local likelihood equations based on
${\cal L}_x({\bf a})$ with $\lambda$ replaced by $\hat{\lambda}^r$. Note that, given the offset surface is assumed to be constant over each region $S_{ij}$, expectation is computed with respect to the conditional density
\begin{equation} \label{conden}
\frac{\hat{\rho}^{r}(u)}{\int_{S_{ij}}{\hat{\rho}^{r}(x)} \, \mathrm{d}x}=
\frac{\hat{\lambda}^{r}(u) }{\int_{S_{ij}}{\hat{\lambda}^{r}(x)} \, \mathrm{d}x}
%\lambda(t){\Bigg /}\int_{I_{ij}}\lambda(u)\,\mathrm{d}u
\end{equation}
at the $r$th iteration.

\subsection{Implementation as an EMS algorithm}
The above algorithm may be implemented using a discretization of
$\lambda$. This effectively reduces an infinite dimensional
estimation problem to one that has finite
dimension. To this end we define a piecewise constant function over the partition $\mathcal{Q}$ as follows
$$
g_{\phi}(x; \mathcal{Q}) = \area{Q_j}^{-1} \int_{Q_j} \phi(u)\dee{u} \quad \mbox{for $x \in Q_j$,}
$$ where $\phi$ is any integrable function over all $Q_{j}$'s.  The function $g_{\phi}$ may be formally referred to as the $\mathcal{Q}$-approximant of $\phi$ (see \cite{royden1988ra} for details). Now let $\Lambda_j=\int_{Q_j} \lambda(u)\, \mathrm{d}u$ and denote the collection of all $\Lambda_j$'s by the
vector $\bf \Lambda$.  Finally, denote the indicator
function of the set $S_{ij}\cap Q_l$ by ${\cal I}_{ijl}$. 

Rather than using $\hat{\lambda}_{r}$ to compute conditional
expectations, consider simplifying the iteration by using the
piecewise constant function 
$$\bar{\lambda}^r = g_{\hat{\lambda}^{r}}.$$
Here the conditional
density (\ref{conden}) is replaced by
\[
{\bar{f}}^r_{S_{ij}}(x)=\frac{\hat{\Lambda}^r_{\ell}{\cal
I}_{ij\ell}}{\area{Q_\ell} \sum_m \hat{\Lambda}^r_{m} {\cal I}_{ijm}}
\text{ for $x\in Q_\ell$,}
\]
and $\mbox{E}_{\hat{\lambda}^{r}}\left[\left. K_h\left({X-x}\right)\right|X\in S_{ij}\right]$ becomes
\begin{align}
\mbox{E}_{\bar{\lambda}^{r}}\left[\left. K_h(X-x) \right| X \in S_{ij} \right]&=
\int_{S_{ij}} K_h(u-x) {\bar{f}}^r_{S_{ij}}(u) \, \mathrm{d}u\notag\\
%
&=\sum_{\ell}\int_{Q_\ell}
K_h(u-x) \, \frac{\hat{\Lambda}^r_{\ell}{\cal I}_{ij\ell} }{\area{Q_\ell}\sum_m
\hat{\Lambda}^r_{m} {\cal I}_{ijm}}\, \mathrm{d}u\notag\\
%
&=\sum_{\ell} \frac{\hat{\Lambda}^r_{\ell} {\cal
I}_{ij\ell} \int_{Q_\ell} K_h(u-x) \dee{u}}{\area{Q_{\ell}}\sum_m \hat{\Lambda}^r_{m} {\cal I}_{ijm}}\cdot \label{Cev}
\end{align}
%
Substitution of the above into expression (\ref{localem}) leads to the computation of $\hat{\Lambda}^{r+1}_{s}$ at the next iteration. This in turns leads to the simple iteration
\begin{eqnarray*}
\hat{\Lambda}^{r+1}_{s}&=&\int_{Q_s} \hat{\lambda}^{r+1}(x)\, \mathrm{d}x\\
%
&=&\int_{Q_s} \left\{ \sum_{ij\ell}N_{ij}\frac{\hat{\Lambda}^r_{\ell}{\cal
I}_{ij\ell}}{\area{Q_\ell} \sum_m \hat{\Lambda}^r_{m} {\cal
I}_{ijm}} \frac{\int_{Q_\ell} K_h({u-x})\,\mathrm{d}u}{\Psi_h(x; \hat{\bf a}^{r+1})}\right \} \dee{x}\\
%
&=&\sum_{ij\ell}N_{ij}
\frac{\hat{\Lambda}^r_{\ell} {\cal I}_{ij\ell}}{\area{Q_\ell}\sum_m \hat{\Lambda}^r_{m} {\cal I}_{ijm}} \int_{Q_s} \frac{\int_{Q_\ell}K_h(u-x)\dee{u}}{\Psi_h(x; \hat{\bf a}^{r+1})}\dee{x}.
\end{eqnarray*}

The iteration may be conveniently expressed in terms of matrices as
\begin{eqnarray}\label{EMSic}
\hat{\bf \Lambda}^{r+1}={\mathfrak M}(\hat{\bf \Lambda}^{r}) {\cal K}_h(\hat{\bf \Lambda}^{r}),
\end{eqnarray}
Here ${\cal K}_h$ is a $J$-by-$J$ smoothing matrix with entries
\begin{equation}\label{smoothstep}
[{\cal K}_h]_{\ell s}=
\frac{\tilde{\off}_{\ell}}{\area{Q_{\ell}}}\int_{Q_s}\frac{\int_{Q_\ell}K_h(u-x)\dee{u}}{\Psi_h(x; \hat{\bf a}^{r+1})}\dee{x}
\end{equation}
and ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ is a $J$
dimensional row vector whose $\ell$th entry is
\begin{equation}\label{EMstep}
[{\mathfrak M}(\hat{\bf \Lambda}^{r})]_\ell=\sum_{ij} \frac{N_{ij}}{\tilde{\off}_\ell}
\frac{\hat{\Lambda}_{r\ell} {\cal
I}_{ij\ell}}{\sum_m \hat{\Lambda}_{rm} {\cal I}_{ijm}},
\end{equation}
where $\tilde{\off}_\ell = \sum_{ij}\mathcal{I}_{ij\ell} \off_{ij}$.
The latter is recognized as a step in an EM algorithm (\S 5) and hence the iteration (\ref{EMSic}) is seen to explicitly involve an expectation, maximization \emph{and} smoothing step.
That is, by discretizing $\lambda$ our implementation of the local-EM algorithm has resulted explicitly in an EMS algorithm.
EMS algorithms were first proposed by \cite{silvermanems}  as an {\it ad hoc} method for improving the behaviour of the EM algorithm by including a smoothing step. Here they are seen to arise formally from local likelihood considerations when data are interval or area censored. Further comparisons with Silverman are given in \S 6.

\vspace{15pt}
\noindent
{\bf Remark:} At the $(r+1)$st iteration our estimate of $\lambda$ will be given by
\begin{eqnarray}\label{emsiterate}
 \left\{ \sum_{ij\ell}N_{ij}\frac{\hat{\Lambda}^r_{\ell}{\cal
I}_{ij\ell}}{ \sum_m \hat{\Lambda}^r_{m} {\cal I}_{ijm}} \frac{\int_{Q_l}K_h\left({u-x}\right)\dee{u}}{\area{Q_{\ell}}\Psi_h(x; \hat{\bf a}^{r+1})}\right \}.
\end{eqnarray}
This differs from (\ref{localem}) and the relationship between the two is discussed in \S 4.2 and \S A.2.

\section{Convergence and the Role of Local-EM}
Thus far we have exposed an interesting relationship between classes
of algorithms that demonstrates the EMS algorithm arises naturally
from local likelihood considerations. This occurs because of the way
we have chosen to implement the local-EM algorithm. However, we
could have instead chosen to implement this algorithm through
multiple imputation, or by using MCEM, or through some other
favorite techniques. So why EMS?

In this section, we exploit the relationship between local-EM and the EMS algorithm to gain insight into convergence issues and to expose the role of local-EM as being paired with penalized likelihood in a manner analogous to the pairing of EM and likelihood.

\subsection{An Upper Bound for the Convergence Rate}
\label{sec:convergence}
Our objective is to allow the algorithm (\ref{EMSic}) to iterate until it converges to a fixed point
$\hat{\boldsymbol{\Lambda}}$ that solves
$$
\hat{\bf \Lambda}={\mathfrak M}(\hat{\bf \Lambda}) {\cal K}_h(\hat{\bf \Lambda}).
$$
We rely on results given in \cite{latham1995ems}, \cite{green1990uap} and \cite{silvermanems}  to demonstrate the uniqueness of $\hat{\boldsymbol{\Lambda}}$ and the convergence of the algorithm to this solution. 
%In addition, we restrict developments to the locally constant case.

% For expression (\ref{smoothstep}) note that ${\bf \hat{a}}_{r}$ depends on $\hat{\bf
% \Lambda}_r$, and hence so does the smoothing step. Here we refer to
% algorithm as \emph{adaptive}. However, 
In the locally constant case the polynomial $\cal P$ is truncated at its leading term and $\Psi_h$ simplifies to
$$
\Psi_h(x;\mathbf{a})=\sum_{i} \int_{M_i} \off_i(u) K_h(u-x) \dee{u} = \sum_{\ell} \tilde{\off}_\ell \int_{Q_\ell} K_h(u-x) \dee{u}.
$$
Here the smoother matrix $\mathcal{K}_h$ no longer depends on $\boldsymbol{\Lambda}$ and, in this case, \cite{latham1995ems} shows the uniqueness of
$\hat{\boldsymbol{\Lambda}}$ in a region where $\Lambda_k > 0$ for all $k$. This implies that the iteration will converge to the unique solution if it converges
at all. Following \cite{latham1995ems}, we restrict developments to the locally constant case and demonstrate the convergence of (\ref{EMSic}) by showing its spectral radius at $\hat{\boldsymbol{\Lambda}}$ decreasing toward zero as the bandwidth value increases.

\cite{green1990uap} shows that the spectral radius of the ${\mathfrak M}(\Lambda)$ at an EM solution is less than 1, and \cite{silvermanems} claims that introducing a smoothing step only reduces the spectral radius further. Nevertheless, these two
properties are not sufficient conditions for the algorithmic convergence because the spectral radii are evaluated at different
values of $\boldsymbol{\Lambda}$. Empirical evidences suggest that (\ref{EMSic}) usually converges and we also observe that (\ref{EMSic}) converges faster than EM when $h$ is sufficiently large.

Without  loss of generality, assume $\off_i(u)= 1$ for all $u \in \mathcal{M}$.  Let
$\gamma$ be the spectral radius of $\partial{\mathfrak M}=\partial
{\mathfrak M}(\hat{\boldsymbol{\Lambda}})/\partial
\boldsymbol{\Lambda}$. Here, we derive an upper bound for $\gamma$
and show this upper bound increases with respect to the
``roughness'' of $\boldsymbol{\Lambda}$ and decreases as the value
of bandwidth increases. By the Perroni-Frobenius theorem,
$$
\gamma \leq \max_s \sum_t \left[ \partial {\mathfrak M}\,
\mathcal{K}_h \right]_{ts},
$$
where $\partial {\mathfrak M}$ is a $J \times J$ matrix with
\begin{align*}
\left[ \partial {\mathfrak M} \right]_{tk} &\equiv \dfrac{\partial
{\mathfrak M}_k}{\partial \Lambda_t} =\begin{cases} \sum_{ij} \dfrac{N_{ij}}{\tilde{\off}_k}
\dfrac{\sum_{\ell \ne k} \mathcal{I}_{ijk} \mathcal{I}_{ij\ell}
\Lambda_\ell}{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \Lambda_\ell \right)^2} & \mbox{when $k=t$}\\
& \\
\sum_{ij} \dfrac{N_{ij}}{\tilde{\off}_k} \dfrac{-\mathcal{I}_{ijt} \mathcal{I}_{ijk}
\Lambda_k}{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \Lambda_\ell \right)^2} & \mbox{otherwise}
\end{cases}
\end{align*}
After some algebraic manipulations, it can be shown that
\begin{align}
\gamma & \le \max_s \sum_k \left( \sum_{ij} \dfrac{N_{ij}}{\tilde{\off}_k} 
\dfrac{\sum_{t \ne k} \mathcal{I}_{ijk} \mathcal{I}_{ijt} (\hat{\Lambda}_{t} -\hat{\Lambda}_k)}%
{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \hat{\Lambda}_\ell \right)^2} \right) \mathcal{K}_{ks}.
\label{e:spectral_upper}
\end{align}

%Note that $\hat{\boldsymbol{\Lambda}}$ is a function of the bandwidth value $h$.

%\noindent \Steve{I don't think the following should be here.} Noting the partition $\cal Q$ can in fact be defined in an arbitrary way (see \S A.2 for details). 

\noindent In addition, %as $h \to \infty$,
\[
\left[ \mathcal{K}_{h} \right]_{ks} \to \dfrac{\tilde{\off}_{k} \area{Q_{s}}}{\sum_{m} \tilde{\off}_{m} \area{Q_{m}}}  
\ \text{as }\ h \to \infty.
\]
In the special case in which $\area{Q_j} = \area{Q}$, increasing $h$ renders the smoothing matrix more uniform among rows, and this would, in turn, narrows the spread of $\hat{\boldsymbol{\Lambda}}$, i.e.\ $ (\hat{\Lambda}_\ell - \hat{\Lambda}_k) \to 0$.
% If the $Q_j$'s are of equal length, then not only does increasing
% $h$ narrow the spread of $\hat{\boldsymbol{\Lambda}}$, but it also
% makes $\mathcal{K}_{\ell s}$ more uniform within rows. That is,
% $$b
% (\hat{\Lambda}_\ell - \hat{\Lambda}_k) \to 0 \ \forall \ \ell, k
% \quad \mbox{and}\quad \mathcal{K}_{\ell s} \to \frac{\tilde{\off}_{\ell}}{\sum_{\ell} \tilde{\off}_{\ell}} \ \forall\ s.
% $$
%$$
%(\hat{\Lambda}_\ell - \hat{\Lambda}_k) \to 0 \ \forall \ \ell, k
%\quad \mbox{and}\quad \mathcal{K}_{\ell s} \to \frac{\sum_i
%\off_{i\ell}}{\sum_{ik} \off_{ik}} \ \forall\ s.
%$$
As a result, increasing value of bandwidth pushes the upper bound toward 0, thus accelerating the algorithmic convergence.

\subsection{The Role of local-EM} \label{sec:llem_role}
Below we summarize results meant to strengthen the
suggestion that, for the context considered in this paper, local-EM
and penalized likelihood may be paired in a manner analogous to the
pairing of EM and likelihood. We do this in three steps. Details are given in the appendix.

\vspace{20pt}
%
\noindent {\bf Local-EM and the Modified EMS algorithm:}
\cite{nychka1990spa} identified a relationship between EMS and
penalized likelihood by demonstrating that a modified EMS algorithm
maximizes 
\begin{equation} \label{e:npllk}
\mathcal{L}(\boldsymbol{\theta}) 
+{\bf Pen}(\boldsymbol{\theta}, {\cal K}).
\end{equation} 
Here $\mathcal{L}(\boldsymbol{\theta})$ is the appropriate nonparametric likelihood and is context dependent \citep{turnbull1976edf, wellner2000npmle, vardi1985pet}. The parameter $\boldsymbol{\theta}$ is a vector with components $\theta_{ij}$ such that $\theta_{ij}^2 =\Lambda_{ij}$ for all $i,j$, and ${\bf Pen}(\boldsymbol{\theta}, {\cal K})$ is a penalty function that depends on both $\boldsymbol{\theta}$ and some smoothing matrix ${\cal K}$.

In Appendix \ref{appx_ems} we demonstrate that, with the choice of an equivalence kernel, the local-EM algorithm may be used to maximize a penalized likelihood function. This occurs because the equivalent kernel leads, under the discretization of $\lambda$, to Nychka's modification of the EMS algorithm.

\bigskip
\noindent {\bf $\mathbf{\mathcal{L}}^1$-Convergence of EMS to local-EM:} In
\S\ref{sec:llems_alg} the discretization of $\lambda$ over the partition ${\cal
Q}$ resulted in a local-EM algorithm collapsing explicitly into an
EMS algorithm. There we made a distinction between the local-EM iterate (\ref{localem}) and its EMS counterpart (\ref{emsiterate}).
In appendix \ref{appx_convergence}, we consider this
discretization for an arbitrary partition where we let $J \to \infty$ and $\max_j \area{Q_j} \downarrow
0$. We demonstrate that the EMS iterate, and the modified EMS iterate, both converge to their
local-EM counterparts in the $\mathcal{L}^1$ norm. This result suggests local-EM and EMS techniques may be thought of synonymously.

\bigskip
\noindent {\bf Local-EM and Penalized Likelihood:} In appendix A.3 we consider the penalized likelihood of \ref{appx_ems} under the limiting conditions of \ref{appx_convergence}. In particular, we interpret the penalty in terms of a class of functions that we then study as $\max_j |\!|Q_j|\!| \downarrow 0$.

This ultimately allows us to speculate that the role of local-EM is to penalize the usual nonparametric likelihood for departures of the target function from the class $${\cal Z}=\left\{f \ \Bigg | \, f^{1/2}(x)=\int_{\cal
M} \dfrac{K_h(u-x)}{\int_{\map} K_h(u-x) \dee{u}}f^{1/2}(u)\, \mathrm{d}u \ \mbox{for all $x \in {\cal S}$}
\right\}$$ identified by the eigenfunction of the kernel.
\section{Examples}

\subsection{Density Estimation for Failure Time Data}

Consider a bivariate failure time process, such as time to HIV infection and AIDS onset, or in flu epidemics an infected individual's time to onset of symptoms and the first time they transmit the infection to another individual.  As infections are usually only revealed through repeated testing the event times are interval-censored and only known to fall between two consecutive clinic visits, one where the patient tests negative for the presence of the virus and a follow-up visit where they test positive. Multivariate density estimation in this context may be facilitated by (\ref{llicdp}), which now simplifies to
\begin{align*}
{\cal L}_x({\bf a}) &= \sum_{i=1}^n \mbox{E}_{\lambda}[K_h(X_i-x){\cal P}(X_i-x)|I_i]-n \int_{\cal M} K_h(u-x)\exp\{{\cal P}(u-x)\}\dee{u}
\end{align*}
where $\mathcal{O}_i(u) = 1$ for all $u \in {\cal M}$ .

\subsubsection{One dimension}
For univariate failure time data, ${\cal M}=\Re$ and $X_i$ is the event time for the $i$th individual which is only known to fall in the interval $I_i$. For the iteration (\ref{EMSic}) we recognize ${\mathfrak M}(\hat{\bf \Lambda}_{r})$ as a step in the EM algorithm of \cite{turnbull1976edf}. Furthermore, in the case of a histogram, $\{I_i\} \equiv {\cal Q}$, the local-EM algorithm (\ref{localem}) will only iterate once and reduces to methods of Jones (1989) for smoothing histograms. Finally, \cite{braun2005lld} proposes a local-EM algorithm based on ${\cal L}_x({\bf a})$ and, without being aware of it, develop an EMS implementation.

\subsubsection{Two dimensions}
When two events are being modelled, $X_i$ is bivariate and ${\cal M}=\Re^2$. Typically, when data are doubly interval-censored, the estimation of NPMLE for the joint distribution is complex and a treatment of some of the issues can be found in \cite{maathuis2005ran}. In the rest of this example, we consider a simple case that suggests local-EM in this context may ease these complexities and thus deserves further exploration. For example, the use of a local-EM algorithm does not require identifying maximal sets (the analog of inner most intervals in the univariate case) and thus does require use of the height map algorithm of \cite{maathuis2005ran}. Also the solution is unique while the NPMLE is not. A Bayesian interpretation of local-EM provides insight into the latter.

A hypothetical example of bivariate interval-censored data is shown in Figure \ref{fig:bivariate}a. These data consists of eights bivariate intervals, represented by four horizontal and four vertical rectangles. Overlaying these observations forms a partition
of 81 unit squares, and the intersections of these rectangles form the maximal sets. A NPMLE places all probability on these sets however it is not unique. For example, a uniform weight of 1/16 on all 16 sets, a weight of 1/4 on the positive diagonal sets, and a weight of 1/4 on the negative diagonal sets all maximize the nonparametric likelihood. Consequently, the EM iteration will converge to one of the solutions depending on the initial value. However, empirical evidence suggests otherwise for the local-EM algorithm.

Figure \ref{fig:bivariate}b shows the estimated intensity surface for a circular kernel.  In this case the EMS iteration will always converge to a solution that favours  uniform weighting regardless of the starting value. Figure \ref{fig:bivariate}c gives the intensity estimate using a kernel with an elliptical contour such that bandwidths are 1.5 and .15 in the x- and y-direction and rotated by 45 degrees.  In this case, the local-EM algorithm converges to a solution that favours the positive diagonal. Similarly, when this kernel is instead rotated by -45 degrees, Figure \ref{fig:bivariate}d shows the local-EM algorithm converges to a solution that favours the
negative diagonal. This behaviour can be interpreted in terms of the penalized likelihood of \S \ref{appx_ems}. Here the local-EM algorithm aims to maximize the penalized likelihood (\ref{e:npllk}) where the penalty depends on the choice of kernel. The kernel leads us to favour one NPMLE over another \emph{a priori}.  Explicitly when the kernel is radially symmetrical, any deviations from the maximal eigenfunction are equally penalized. However, as the kernel becomes more elliptical, deviations in the direction of the major axis of the elliptical contour are penalized less than those in the direction of the minor axis.
\vspace{-1.5cm}
\begin{figure}
\vspace{-.05in} 
\centering
\subfigure[Regions corresponding to eight bivariate interval-censored data points]{\includegraphics[width=0.45\textwidth, height=0.45\textwidth,  clip=true, trim=0.8in 0.45in 1in .75in]{bivariate_example.pdf}}\\
\subfigure[EMS intensity estimate from circular kernel]{\includegraphics[width=0.32\textwidth, clip=true, trim=1.75in 0.45in
2in .55in]{ems_1_08mar2009.pdf}}
\subfigure[
Elliptical kernel, positive-sloped major axis
]{\includegraphics[width=0.3\textwidth,clip=true, trim=2.25in 0.45in
2in .55in]{ems_2_08mar2009.pdf}}
\subfigure[Elliptical kernel, negative-sloped major axis]{\includegraphics[width=0.3\textwidth, clip=true, trim=2.25in 0.45in
2in .55in]{ems_3_08mar2009.pdf}} 
\caption{An artificial example of bivariate interval-censored data (a), with EMS estimates of the intensity using three different kernels (b-d).}
\label{fig:bivariate}
\end{figure}
\newpage
\subsection{Intensity Estimation for Panel Count Data}
In this example we consider the situation described in \S 2.1.2 where individuals are monitored in time and events follow an inhomogeneous Poisson process. Monitoring involves periodic assessments, so events are interval censored. Here, individuals may drop out of the study at different time points and we use $Y_i(x)$ to indicate if the $i$th individual is in the study at time $x$. Since an individual's event process is observable only before he/she drops out, the intensity of the observable process equals $Y_i(x) \lambda(x)$ (see \cite{andersen1993smb}). Finally, the number of at-risk individuals at any time $x$ is denoted by $Y(x)=\sum_i Y_i(x)$.

We assume that the event, assessment and drop-out processes are independent of one another, and that the drop-out process is monotone. Under this setting, the local-EM algorithm still derives from (\ref{llicdp}) which now becomes
$$
\sum_{ij} N_{ij} \mbox{E}_{\lambda}\left[Y_i(X) K_h(X -x){\cal P}(X -x) \mid X \in S_{ij}\right] - \int_{\cal M} Y(u) K_h(u-x) \exp\{{\cal P}(u - x) \} \, \mathrm{d}u
$$
and the corresponding EMS implementation (\ref{EMSic}) has
$$
[\mathfrak{M}(\hat{\boldsymbol{\Lambda}}^{r})]_{\ell} = \sum_{ij} N_{ij}
\frac{Y_i(\tau_\ell)\hat{\Lambda}^{r}_{\ell} {\cal I}_{ij\ell}}{Y(\tau_\ell)\sum_m
\hat{\Lambda}^{r}_{m} {\cal I}_{ijm}}
$$
and $$[\mathcal{K}_h(\mathbf{\hat{\Lambda}}^{r})]_{\ell s}= \frac{Y(\tau_\ell)}{|\!|Q_\ell|\!|}\int_{J_s}
\dfrac{\int_{J_\ell}K_h\left({u-x}\right)\,
\mathrm{d}u}{\Psi_h\left(x; \mathbf{a}^{r+1} \right)}\,
\mathrm{d}x.$$
Note $\mathfrak{M}(\hat{\boldsymbol{\Lambda}}_{r})$ is a step in the self-consistent algorithm of \cite{hu2008gls}.

%\noindent Note that $\int_{J_\ell} K_h( u-t)\, \mathrm{d}u \rightarrow \mathcal{I}_{J_\ell}(t)$ and $\Psi_h[\mathbf{a}(t;\mathbf{\hat{\Lambda}}_r)] \rightarrow Y(t)$ as $h \searrow 0$. It follows
%$$\lim_{h\downarrow 0} [\mathcal{K}_h]_{ls} =
%\lim_{h\downarrow
%0}\frac{Y(\tau_\ell)}{||J_\ell||}\int_{J_s}\dfrac{\int_{J_\ell}K_h\left({u-t}\right)\,
%\mathrm{d}u}{\Psi_h[\mathbf{a}(t; \mathbf{\hat{\Lambda}}_r)]}\,
%\mathrm{d}t
%=\dfrac{Y(\tau_\ell)}{||J_\ell||}\int_{J_s}\frac{1_{J_\ell}(t)}{Y(t)}\,
%\mathrm{d}t
%=\delta_{sl}.$$ Consequently,
%\noindent As $h \downarrow 0$, %the smoothing matrix ${\cal K}_h$
%converges to the identity matrix and %

\subsubsection{A simulation study}

A simulation study was carried out to examine the mean integrated squared error (MISE) of the local-EM estimator as well as several alternatives. Event times follow a Poisson process with intensity $\lambda(x)$ equal to a re-scaled
gamma density function (shape = 9 and rate=3/4). Each subject is assumed to have a sequence of predetermined observation times $\tau_1, \tau_2, \ldots, \tau_J$ where $\tau_j =
j$ and $J=20$. However, subjects miss a visit with increasing probability, specifically, the probability of missing a visit equals
$(\tau_j/20)^{1/4} - 0.05$. Finally, a subject's panel counts are obtained by aggregating events times among consecutive observed
visits. Note that each subject is assumed to have no event at time 0.

For each of $S$ samples, and for a fixed window size $h$, we compute several estimators of the intensity using a Gaussian kernel. For each estimator, $\hat{\lambda}$, we approximate its MISE as 
$S^{-1} \sum_k \int (\hat{\lambda}_k(u) - \lambda(u))^2 \, \mathrm{d}u$. This was performed for 40 different values of $h$ between $0.05$ and $3.95$ with $S=300$. The resulting MISE's for each estimator are plotted in Figure \ref{f: sim_mise}. The first estimator assumes no interval censoring has taken place and uses the exact event times themselves, rather than the panel counts. This is the gold standard. For the panel counts we use the partition $\mathcal{Q}$ and compute the local-EM estimator in both the constant and linear cases, that is, where the polynomial is truncated at the first or second term. In addition, as an alternative to, and competitor of, the local-EM estimators we consider simply smoothing the self-consistent estimator of \cite{hu2008gls} after their EM algorithm has converged. 

The results favour the local-EM estimator considerably. While the gold standard achieves the smallest MISE, the local-EM estimators track it quite closely and attain the next smallest MISE for a similar window size. Smoothing after the EM algorithm converges has the worst performance achieving a minimum MISE that is larger for a larger window size. This result is perhaps not all that surprising given that the $\lambda$ is quite non-linear. In cases where $\lambda$ is linear the improvements
in MISE for the local-EM estimator are not as dramatic. Another simulation was performed in the spatial context with similar results.

\begin{figure}\centering
\includegraphics[width=1.0\textwidth]{newmise_intensity.pdf}
\caption{Mean integrated squared error as a function of bandwidth for various estimation methods.  The proposed local EM intensity estimate achieves the lowest overall MISE with a small bandwidth of 0.195, comparing to the smoothed EM estimate by placing expected increments at the centres of pixels.} 
\label{f: sim_mise}
\end{figure}

\subsection{The Spatial Structure of Lupus in the City of Toronto}

In this example, we consider the setting described in \S 2.2 and investigate the spatial structure of lupus in the Greater Toronto Area (GTA). The lupus clinic at the Toronto Western Hospital records the census tracts where individuals with lupus reside, and has data from 1965 to 2007. If lupus is affected by a spatially varying environmental or social risk factor, it should result in a spatially smooth relative risk surface $\lambda(s)$.

Disease incidence is assumed to arise from an inhomogeneous Poisson process in space and time, in which the intensity is given as $\rho_k(x,t) = \lambda(x) {\cal O}_k(x,t)$ with the offset surface ${\cal O}_k$ given as
$$
{\cal O}_k(x,t) =  \beta(t) \theta_k P_k(x,t).
$$
Here the subscript denotes the $k$th age-sex group, $\theta_k$ is the incidence rate for this group, $P_k(x,t)$ is the population intensity (in persons per km square), and $\beta(t)$ is the time trend. Using regionally aggregated case counts to estimate relative risk surface $\lambda(s)$ is the objective. The main complication is that boundaries of census regions used to aggregate the data change repeatedly over the study period.

Census periods are defined as beginning and ending at the mid-points between census years before and after a given census. Period $i$ covers the years $t_{i-1}$ to $t_i$ and  $i=1\ldots T$ where $T$ is the total number of census periods during the study. The $j$th census region for the $i$th census period is denoted as $S_{ij}$ and these regions have boundaries that vary between census periods.  For simplicity, we assume $\beta(t)$ and the population $P_k(s,t)$ are constant within a census period so that $\beta(t)=\beta_i$ when $t$ is in period $i$ and 
$$
P_k(x,t) = P_{ik}(x) = P_{ijk}/ \area{S_{ij}}\quad \text{for $x \in S_{ij}$.}
$$
where $P_{ijk}$ is the population count for group $k$ in region $S_{ij}$. As a result of these simplifications, the offset is constant over the region $S_{ij}$ within a census period.
$$
{\cal O}_k(x,t) =  {\cal O}_{ik}(x) =\beta_i \theta_k P_{ik}(x).
$$
Finally, the available data are case counts of the form $N_{ijk}$ for individuals in group $k$ who were diagnosed with lupus during census period $i$ while living in region $S_{ij}$.

The model is fitted in two stages. At the first stage, the spatial variation in $\lambda(x)$ is ignored so that case counts $N_{ijk}$ may be assumed to be distributed as
$$
N_{ijk} \sim \text{Poisson}(\theta_k \beta_i (t_{i}-t_{i-1}) P_{ijk}).
$$
This allows $\beta_i$ and $\theta_k$ to be estimated from a generalized linear model. At the second stage, $\beta_i$ and $\theta_k$ are set to the values estimated at the first stage and treated as if they were known. They are then used to construct the offsets $${\cal O}_i(x) = \sum_k {\cal O}_{ik}(x).$$
These offsets are in turn used in the iteration (\ref{EMSic}) to estimate $\lambda(x)$.


Figure \ref{f: lupus_map} shows the estimated intensity surface using the local-EM algorithm with locally constant risks within square grid cells and a bandwidth of 1.35km.  The offsets were computed by calculating empirical rates by age and sex groups and applying these to the population data from the census for census data from 1971, 1981, 1991, and 2001.  The risk surface is fairly flat and near unity throughout most of the region, with an area of elevated risk near the centre of the downtown area of the city.  This could be due to a risk factor not accounted for, such as ethnicity, or reporting bias due to the proximity to the clinic.  Detailed results and discussion of this application will be reported elsewhere.


\begin{figure}\centering
\includegraphics[height=0.75\textheight, angle=0]{Relative_Risk.pdf}
\caption{Estimated risk surface for lupus using EMS with a bandwith of 1350 m.} 
\label{f: lupus_map}
\end{figure}


We conclude this example by making a comparison between the local-EM
algorithm in this context to methods in the literature. Note that if we only have a
single map ($j=1$) then $Q_\ell$ and $S_{j\ell}$ coincide so that $I_{ij\ell}=0$ for all $j\neq \ell$. As a result the kernel weight given in (\ref{Cev}) simplifies to $\int_{Q_\ell}K_h\left({u-t}\right)\, \mathrm{d}u/\area{Q_\ell}$, the algorithm (\ref{localem}) iterates once and the local-EM estimator simply becomes the Nadaraya-Watson estimator advocated by \cite{brillinger1990st, brillinger1991si, brillinger1994esp} in a series of papers concerning spatial smoothing where data are aggregated to regions within a map.

\section{Discussion}

Local likelihood can be seen as a semi-parametric method, providing a compromise between the power and theoretical rigour of parametric methods and the flexibility of kernel smoothing algorithms.  Local-EM provides a method for applying local likelihood in situations where interval or area censoring with irregular observed regions.  By demonstrating that local-EM and the EMS algorithm are related, it is hoped that the computational advantages offered by EMS will lead to greater adoption of local-EM methods.  Formulating EMS problems in the context of local likelihood allows for a natural and rigorous method of incorporating offsets.

A final comparison of local-EM to \cite{silvermanems} permits further insights beyond what has already been discussed in the paper. In \cite{silvermanems} quantities analogous to $S_{ij}$ and $Q_\ell$ are referred to as observation and reconstruction bins respectively and the context concerns image reconstruction involving a single image rather than multiple maps say. As a result, example \S 5.3 could well be thought of as an extension of the image reconstruction techniques of \cite{silvermanems} to an epidemiological setting. Furthermore, noting there are no offsets in \cite{silvermanems}, the expression (2.2) given there and ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ are related. For example, their weights $p_{st}$ simplify in our setting to the indicator variables ${\cal I}_{ij\ell}$ because we have assumed the locations of events have been measured without error. This observation provides an avenue for extending the local-EM toolbox to settings where data are mismeasured, but this is beyond the scope of this paper. Finally, we note that in our context ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ is an extension of \cite{vardi1985pet} to multiple maps where data are not mismeasured.



%\vspace{15pt}
%\noindent
%{\it should we mention Vardi anywhere, or do we need to demonstrate that ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ is an EM step}
\section*{Acknowledgements}
We are grateful to Dr. Paul Fortin for the provision of the lupus data. We would also like to acknowledge the Natural Sciences and Engineering Research Council of Canada for supporting this research through individual operating grants.

\bibliography{llems}

\appendix
\section{Appendix}
\subsection{Local-EM and the Modified EMS algorithm}\label{appx_ems}
\cite{nychka1990spa} identified a relationship between the EMS algorithm and a penalized likelihood by demonstrating that a modified EMS algorithm maximizes this penalized likelihood. In this section, we demonstrate that the local-EM algorithm may be used to maximize the penalized likelihood with the appropriate choice of kernel, namely an equivalent kernel. This occurs because the equivalent kernel leads to Nychka's modification of the EMS algorithm.

We begin by first considering (\ref{e:npllk}) with
$$
{\bf Pen}(\boldsymbol{\theta}, {\cal K})=\boldsymbol{\theta}^{T} \mathbf{R} \boldsymbol{\theta}.
$$ 
Here $\mathbf{R} = \mathcal{K}^{-1} - \tilde{\boldsymbol{\off}}$, where $\mathcal{K}$ is any symmetrical smoothing matrix and $\tilde{\boldsymbol{\off}}$ is a diagonal matrix with $\tilde{\off}_\ell$.
We explore the relationship between this penalized likelihood and the local-EM algorithm in the locally constant case by considering the following function
\begin{equation} \label{e:equivalent_kernel}
(1/\lambda(u))^{1/2} K_h(u-x),
\end{equation}
where $\lambda$ is the smooth component of the true density or intensity, and $K_h(u-x)$ is any symmetric positive kernel with compact support. The renormalization of (\ref{e:equivalent_kernel}) gives
$$
K_{h}^{\ast}(u - x) = (\lambda(x)/\lambda(u))^{1/2} K_h(u-x)
$$
and $\int K_{h}^{\ast}(u - x) \dee{u} = 1+o(h)$ for any interior point, $x$. We refer to the kernel $K_h^{\ast}$ as an equivalent kernel. Next, consider the use of the equivalent kernel with the $\cal Q$-approximant $\bar{\lambda}^{r}$ in our local-EM algorithm while assuming $\area{Q_j} = \area{Q}$ for all $j$. This combination results in the conditional expectation $\mbox{E}_{\hat{\lambda}^{r}}\left[ K_h^{\ast}(X-x) \mid X \in S_{ij} \right]$ being approximated by
\begin{eqnarray*}
\mbox{E}_{\bar{\lambda}^{r}}\left[K_h^{\ast}(X-x) \mid S_{ij}\right] &=& 
\sum_{k} \left(
\dfrac{\hat{\Lambda}_{\ell}^{r}}{\hat{\Lambda}_{k}^{r}} 
\right)^{1/2} 
\area{Q}^{-1} \int_{Q_{k}} K_h(u-x)\dee{u}
\dfrac{\mathcal{I}_{ijk}\hat{\Lambda}_{k}^{r}}{\sum_{m} \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{r}} 
\quad \text{for $x \in J_\ell$.}
\end{eqnarray*}
%where $\off_k = \off(x)$ for $x\in Q_k$.  
This in turn gives the following iteration for $\boldsymbol{\Lambda}$:
\begin{align}
\hat{\Lambda}_{\ell}^{r+1} &= n^{-1} \sum_{ijk} \left( \dfrac{\hat{\Lambda}_{\ell}^{r}}%
{\hat{\Lambda}_{k}^{r}} \right)^{1/2} \area{Q}^{-1} \int_{Q_\ell} \dfrac{\int_{Q_k} K_h(u-x) \dee{u}}{\int_{\sum_i \mathcal{M}} \off_i(u) K_h^{\ast}(u-x) \, \mathrm{d}u} \, \mathrm{d}x \dfrac{ \mathcal{I}_{ijk} \hat{\Lambda}_{k}^{r}}{\sum_m \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{r}} \notag \\
%
& \approxeq n^{-1} \sum_{ijk} \left( \dfrac{\hat{\Lambda}_{\ell}^{r}}%
{\hat{\Lambda}_{k}^{r}} \right)^{1/2} \area{Q}^{-1} \int_{Q_\ell} \int_{Q_k} K_h(u-x) \dee{u} \dee{x} %
\frac{1}{\tilde{\off}_\ell} \dfrac{ \mathcal{I}_{ijk} \hat{\Lambda}_{k}^{r}}{\sum_m \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{r}} 
\label{e:modified_EMS}
\end{align}
The expression (\ref{e:modified_EMS}) can be re-expressed in the following matrix form:
\begin{equation} \label{modified_EMSpl}
\hat{\boldsymbol{\Lambda}}^{r+1}={\mathfrak M}(\hat{\boldsymbol{\Lambda}}^{r}) {\cal K}_h^{\ast}\left(\hat{\boldsymbol{\Lambda}}^{r}\right).
\end{equation}
${\cal K}_h^{\ast}\left(\hat{\boldsymbol{\Lambda}}^{r} \right) =
\left(\hat{\boldsymbol{\Theta}}^{r}\right)^{-1}\, \tilde{\boldsymbol{\off}} \, \mathcal{K}_h \, \tilde{\boldsymbol{\off}}^{-1} \, \hat{\boldsymbol{\Theta}}^{r}$, where $\hat{\boldsymbol{\Theta}}^{r} = \text{diag}(\hat{\theta}_{k}^{r})$. Note that $\off_k = 1$ in the context considered by \cite{silvermanems} and \cite{nychka1990spa}. The iteration (\ref{modified_EMSpl}) is recognized as Nychka's modified EMS algorithm with the smoothing matrix equal to ${\cal K}={\cal K}_h$.

In summary, the use of equivalent kernel in the local-EM algorithm leads to Nychka's modified EMS algorithm.

\subsection{Convergence of EMS to Local-EM}\label{appx_convergence}
%
Consider a partition $\mathcal{Q}$ based on a set of $J$ equally spaced grid points over a finite region $\mathcal{M}$. Without the loss of generality, we consider the partition where elements are squares centred at these grid points. We demonstrate that the EMS iterate will converge in $\mathcal{L}^1$ to its local-EM counterpart as $J \rightarrow \infty$ and $\max_j |\!|Q_j|\!| \downarrow 0$. For the sake of clarity, we restrict the attention to the locally constant case.

Without the loss of generality, assume $\map_i = \map$ for all $i$. Denote the EMS and local-EM iterates as $\hat{\lambda}_{J}^{r}$ and $\hat{\lambda}_{\infty}^{r}$, respectively. In addition, assume $S_{ij} \subseteq \mathcal{M}$ for all $i, j$ and $\area{\map} < \infty$. $K(z)$ is a symmetric positive kernel with compact support and $\int K(z) \, \mathrm{d}z =1$. Finally, define a norm on $\mathcal{M}$ to be $\norm{\lambda} = \int_{\cal M} | \lambda(u) |\, \mathrm{d}u$ and interpret the convergence of the function $f$ to the function $g$ to mean that $\norm{f - g} \rightarrow 0$ as $J \to \infty$. This we denote as $f \xrightarrow{{\cal L}^1} g$. These details permit the statement of the following theorem:
%
\begin{theorem} \label{t:uniform_convergence} \end{theorem}
%\vspace{-5pt}
\begin{description}
\item{\bf I.} Define $\mathcal{F}_1 = \left\{ \lambda \in \mathcal{L}^1 \mid
\text{$\lambda$ is non-negative with $\lambda(x) > 0$ for all $x \in
\mathcal{M}$} \right\}.$ For a common initial value $\hat{\lambda}_0
\in \mathcal{F}_1$, we have, for all $r = 1, 2, \ldots$,
\begin{description}
\item{A.} $\hat{\lambda}_{J}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r}$, and
\item{B.} $\hat{\lambda}_{J}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_1$.
\end{description} \label{e: ems_iterate_v1} %%
\item{\bf II.} When the equivalence kernel of \S\ref{appx_ems} is used, we
instead define
$$
\mathcal{F}_2 = \left\{ \lambda \in \mathcal{L}^1 {\Big |} \text{$\lambda$ is
non-negative with $\lambda(x) > 0$ for all $x \in \mathcal{M}$ and
$\int_\mathcal{M} \lambda^{1/2} < \infty$}\right\}.
$$
For a common initial value $\hat{\lambda}_0 \in \mathcal{F}_2$, the
result A and B still hold for all $r$, where ${\cal F}_1$ is
replaced with ${\cal F}_2$ in B. \label{e:ems_iterate_v2}
\end{description}

\noindent Let the square root of $\lambda$ denoted by $\lambda^{1/2}$ and 
$K_h(\cdot)=K(\cdot/h)/h$ for some $h>0$. Define $\mathcal{H}_x$ to be
$\mathcal{H}_x: \mathcal{L}^1 \mapsto \mathcal{L}^1$ such that
$$\mathcal{H}_x(\lambda) = \int \dfrac{K_h(u-x)}{\int_{\map} \off(u)
K_h(u-x)\dee{u}} \lambda(u)\, \mathrm{d}u,$$ where $\off = \sum_i \off_i$.
The proof relies on $\mathcal{H}_x(\lambda)$ being a bounded linear functional as well as some other basic results in operator theory stated as lemmas below. These lemma that may be found in \cite{royden1988ra}.

\begin{lemma}\label{lemma:piece_approx}
Let $\lambda \in \mathcal{L}^1$. Then the $\mathcal{Q}_J$-approximant of $\lambda$ converges in ${\cal L}^1$ to $\lambda$ on $\mathcal{M}$ as $J \to \infty$; that is, $\bar{\lambda} \stackrel{\mathcal{L}^1}{\longrightarrow} \lambda$.
\end{lemma}
%
\begin{lemma}\label{lemma:bl_functional}
If $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$, then
$\mathcal{H}_x$ is a linear bounded functional for all $f \in
\mathcal{L}^1$. That is, for all $x, a, b \in \Re$,
$\mathcal{H}_t(af+b) = a\mathcal{H}_x(f)+b$, and there exists a real
number $M_h$ such that $\mathcal{H}_x (f) \leq M_h \norm{f}$.
\end{lemma}

\begin{lemma} \label{lemma:integral_op1}
Let $\gamma_h(u, x) = g(x) K_h(u-x) f(u)$, where $f$, $g \in \mathcal{L}^1$ and $K_h \in \mathcal{L}^\infty$. Then $\gamma(u, t)$ is an
$\mathcal{L}^1$ function on $\Re^2$ with
\[
\iint \left| \gamma_h(u, x) \right| \, \mathrm{d}u \, \mathrm{d}x \leq
M_h \cdot \norm{g} \cdot \norm{f} .
\]
\end{lemma}

\noindent \textbf{Main Result:} Consider a fixed $h$ and $n$ throughout the entire proof.
\begin{enumerate}
\item[I.]
%\begin{enumerate}
%\item[A.]
%
Let $r=1$. Assume $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$. Note that $\int_{S_{ij}} \bar{\lambda}_{0}(u) \dee{u} = \int_{S_{ij}} \hat{\lambda}_0(u) \dee{u}$ by the definition of $\bar{\lambda}_{0}$. Repeated use of the triangle
inequality gives
\begin{equation*}
\begin{split}
\norm{\hat{\lambda}_{J}^{1} - \hat{\lambda}_{\infty}^{1}} 
&\leq \sum_{ij} N_{ij} \left( \int_{S_{ij}} \hat{\lambda}^0(u)\dee{u} \right)^{-1} \int_{\mathcal{M}} \int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u)
K_h(u-x)\dee{u}}\left| \bar{\lambda}^{0}(u) - \hat{\lambda}^0(u) \right| \dee{u} \dee{x} \\
%line 2
&\leq \sum_{ij} N_{ij} \left( \int_{S_{ij}} \hat{\lambda}^0(u) \dee{u} \right)^{-1} \int_{\mathcal{M}} \int_{\mathcal{M}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u)
K_h(u-x)\dee{u}} \left| \bar{\lambda}^{0}(u) - \hat{\lambda}^0(u)\right| \dee{u} \dee{x} \\
&\leq \area{\map} \sum_{ij} N_{ij} \left( \int_{S_{ij}} \hat{\lambda}^0(u) \dee{u} \right)^{-1} M_h \norm{\bar{\lambda}^{0} - \hat{\lambda}^0}.
\end{split}
\end{equation*}
Here the last inequality is due to the finiteness of $\mathcal{S} \subset \mathcal{M}$ and Lemma \ref{lemma:bl_functional}. By Lemma \ref{lemma:piece_approx}, $\bar{\lambda}^{0} \xrightarrow{{\cal L}^1} \hat{\lambda}^0$, thus $\hat{\lambda}_{J}^{1} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{1}$. Moreover, Lemma \ref{lemma:bl_functional} also ensures that $\hat{\lambda}_{J}^{1}$ and $\hat{\lambda}_{\infty}^{1}$ both belong to the class $\mathcal{F}_1$.\\[20pt]
%
\noindent {\it Induction Step:}
%
Assume that $\hat{\lambda}_{J}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r}$ and $\hat{\lambda}_{J}^{r}$,
$\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_1$. Let $b_{ij}^{r} = \dfrac{\int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)\dee{v}}{\int_{S_{ij}} \bar{\lambda}_{J}^{r}(v) \dee{v}}.$ With the repeated use of the triangle inequality, we have
\begin{align*}
%line 1
& \area{\hat{\lambda}_{J}^{r+1} - \hat{\lambda}_{\infty}^{r+1}} \\
&\hspace{.15in} \leq \sum_{ij} N_{ij}
\int_{\mathcal{M}}\int_{S_{ij}} 
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}} 
\left| \left( \dfrac{\bar{\lambda}_{J}^{r}(u)}{\int_{S_{ij}}
\bar{\lambda}_{J}^{r}(v)\dee{v}} - \dfrac{\hat{\lambda}_{\infty}^{r}(u)}{\int_{S_{ij}}
\hat{\lambda}_{\infty}^{r}(v)\,\mathrm{d}v}\right)\right| \dee{u}\dee{x}\\
%line 3
&\hspace{.15in} \leq \sum_{ij} N_{ij} 
\left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)\,\mathrm{d}v \right)^{-1}
\int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\left | b_{ij}^{r} \bar{\lambda}_{J}^{r}(u)-\hat{\lambda}_{\infty}^{r}(u) \right
|\dee{u} \,\mathrm{d}x \\
%line 5
&\hspace{.15in} \leq \area{\map} \sum_{ij} N_{ij}
\left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)\,\mathrm{d}v \right)^{-1} 
M_h \norm{b_{ij}^{r} \bar{\lambda}_{J}^{r} - \hat{\lambda}_{\infty}^{r}},
\end{align*}
The last inequality is again due to Lemma \ref{lemma:bl_functional}. Now since the induction assumption implies $\bar{\lambda}_{J}^{r}
\xrightarrow{{\cal L}^1} \hat{\lambda}_{\infty}^{r}$ and
$b_{ij}^{r} \to 1$, we have, for all $i, j$,
\begin{equation*}
\Big|\!\Big|b_{ij}^{r} \bar{\lambda}_{J}^{r} -
\hat{\lambda}_{\infty}^{r} \Big|\!\Big|_1 \leq \Big|\!\Big|
b_{ij}^{r} \, \bar{\lambda}_{J}^{r} - \bar{\lambda}_{J}^{r}
\Big|\!\Big|_1 + \Big|\!\Big| \bar{\lambda}_{J}^{r} -
\hat{\lambda}_{\infty}^{r} \Big|\!\Big|_1 \to 0.
\end{equation*}%
In addition, it is evident that $\hat{\lambda}_{k}^{r+1}$ and
$\hat{\lambda}_{\infty}^{r+1}$ belong to $\mathcal{F}_1$ provided
that $\hat{\lambda}_{k}^{r}$, $\hat{\lambda}_{\infty}^{r} \in
\mathcal{F}_1$. Hence, we have (\textbf{A})
$\hat{\lambda}_{k}^{r+1} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r+1}$ on $\mathcal{S}$, and ({\bf B})
$\hat{\lambda}_{k}^{r+1}$, $\hat{\lambda}_{\infty}^{r+1} \in
\mathcal{F}_1$ by induction.
%
%
% Equivalent Kernel
%
%
\item[II.]
%\begin{enumerate}
%\item[A.]
Let $r=1$. Assume $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$. By the triangle inequality and Lemma \ref{lemma:integral_op1}, we have
%
\begin{align*}
&\norm{\hat{\lambda}_{J}^{1} - \hat{\lambda}_{\infty}^{1}} %= n^{-1} %% line2
\leq \sum_{ij} N_{ij} \int_{\mathcal{M}}\int_{S_{ij}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\left| \dfrac{\bar{\lambda}_{0}^{1/2}(u) \,
\bar{\lambda}_{0}^{1/2}(x) - \hat{\lambda}_{0}^{1/2}(u) \,
\hat{\lambda}_{0}^{1/2}(x) }{\int_{S_{ij}} \hat{\lambda}_{0}(v) \,
\mathrm{d}v} \,\right| \dee{u} \dee{x} \\
%%
&\\
% line 3
&\hspace{.15in} \leq \sum_{ij} N_{ij} \left( \int_{S_{ij}} \hat{\lambda}_{0}(v) \dee{v} \right)^{-1} \left\{
\int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\bar{\lambda}_{0}^{1/2}(x) \left| \bar{\lambda}_{0}^{1/2}(u) -
\hat{\lambda}_{0}^{1/2}(u) \right| \, \mathrm{d}u \, \mathrm{d}x \right. \\
%
& \hspace{2in} + \left. \int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\hat{\lambda}_{0}^{1/2}(u) \left| \bar{\lambda}_{0}^{1/2}(x) -
\hat{\lambda}_{0}^{1/2}(x) \right| \, \mathrm{d}u \, \mathrm{d}x \right\} \\
%%
&\\
%%
&\hspace{.15in} \leq \area{\map} \sum_{ij} N_{ij} \left( \int_{R_{ij}} \hat{\lambda}^{0}(v) \dee{v} \right)^{-1} M_h \left( \norm{(\bar{\lambda}^{0})^{1/2}} + \norm{(\hat{\lambda}^{0})^{1/2}} \right) \norm{(\bar{\lambda}^{0})^{1/2} - (\hat{\lambda}^{0})^{1/2}} 
\end{align*}
By Lemma \ref{lemma:piece_approx} and the continuous mapping theorem (\textsc{cmt}),  $\norm{\bar{\lambda}_{0}^{1/2} - \hat{\lambda}_{0}^{1/2}} \to 0$. Therefore, $\hat{\lambda}_{J}^{1} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{1}$. Furthermore, if we choose
$\hat{\lambda}^0$ to be bounded above, then $\hat{\lambda}_{J}^{1}$ and $\hat{\lambda}_{\infty}^{1}$ will be also bounded. This, in turn, ensures that $\hat{\lambda}_{J}^{1},\ \hat{\lambda}_{\infty}^{1} \in \mathcal{F}_2$.\\[20pt]
%
\noindent{\it Induction Step:}
%
Assume that $\hat{\lambda}_{J}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{r}$, $\hat{\lambda}_{J}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_2$, and bounded. Then the induction assumption immediately implies that
$$
c_{ij}^{r} = \frac{\int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)
\, \mathrm{d}v}{\int_{S_{ij}} \hat{\lambda}_{J}^{r}(v) \dee{v}} \to 1\ \mbox{for all $i, j$ and }
(\bar{\lambda}_{J}^{r})^{1/2} \stackrel{\mathcal{L}^1}{\longrightarrow}
(\hat{\lambda}_{\infty}^{r})^{1/2}\ \mbox{on $\mathcal{M}$.}$$
Similar to part (I), Lemma \ref{lemma:piece_approx} and \ref{lemma:integral_op1} imply that
%
\begin{align*}
& \norm{\hat{\lambda}_{J}^{r+1} - \hat{\lambda}_{\infty}^{r+1}}\\
% line 2
&\hspace{.15in} \leq \sum_{ij} N_{ij} \left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(x) \dee{x} \right)^{-1} \int_{\mathcal{M}} \int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}} \left| c_{ij}^{r} \left[\bar{\lambda}_{J}^{r}(u) \, \bar{\lambda}_{J}^{r}(x)\right]^{1/2} \right.\\
& \hspace{4.5in}- \left.\left[\hat{\lambda}_{\infty}^{r}(u) \, \hat{\lambda}_{\infty}^{r}(x)\right]^{1/2} \right| \, \mathrm{d}u \, \mathrm{d}x \\
&\\
% line 3
&\hspace{.15in} \leq \sum_{ij} N_{ij} \left(\int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(x) \, \mathrm{d}x \right)^{-1}
\int_{\mathcal{M}} \int_{\mathcal{M}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}} \left\{ c_{ij}^{r}
\left( \bar{\lambda}_{J}^{r}(x) \left| \bar{\lambda}_{J}^{r}(u) - \hat{\lambda}_{\infty}^{r}(u) \right|\right)^{1/2} \right. \\
% line 4
& \hspace{.9in} + \left. \left| c_{ij}^{r} - 1 \right|
\left[\hat{\lambda}_{\infty}^{r}(u) \bar{\lambda}_{J}^{r}(x)\right]^{1/2} + \left(\hat{\lambda}_{\infty}^{r}(u) \left|
\bar{\lambda}_{J}^{r}(x) - \hat{\lambda}_{\infty}^{r}(x) \right| \right)^{1/2} \right\} \dee{u} \dee{x} \\
&\\
% line 5
&\hspace{.15in} \leq  \area{\map} \sum_{ij} N_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_{\infty}^{r}(x) \, \mathrm{d}x \right)^{-1} M_h\Bigg[
|c_{ij}^{r}| \norm{(\bar{\lambda}_{J}^{r})^{1/2}} \cdot 
\norm{(\bar{\lambda}_{J}^{r})^{1/2} - (\hat{\lambda}_{\infty}^{r})^{1/2}} \\
% line 6
& \hspace{.9in} + |c_{ij}^{r} - 1| 
\norm{(\bar{\lambda}_{J}^{r})^{1/2}} \cdot \norm{(\hat{\lambda}_{\infty}^{r})^{1/2}} + \norm{(\hat{\lambda}_{\infty}^{r})^{1/2}} \cdot \norm{(\bar{\lambda}_{J}^{r})^{1/2} - (\hat{\lambda}_{\infty}^{r})^{1/2}}
\Bigg] \to 0.
\end{align*}
%
% Next, define two functions, $\hat{\zeta}_{k\, r}$ and
% $\hat{\zeta}_{\infty\, r}$, to be
% $$
% \hat{\zeta}_{k\, r}(x) = \int_{\mathcal{M}} K_h(u-x) \bar{\lambda}_{r}^{1/2}(u) \,
% \mathrm{d}u \text{ and } \hat{\zeta}_{\infty\, r}(x) = \int_{\mathcal{M}}
% K_h(u-x) \hat{\lambda}_{\infty\, r}^{1/2}(u) \, \mathrm{d}u.
% $$
%By Lemma \ref{lemma:integral_op2}, $\hat{\zeta}_{k\, r}\ \hat{\zeta}_{\infty\, r} \in \mathcal{F}_2$.
Provided that $\hat{\lambda}_{J}^{r}$ and $\hat{\lambda}_{\infty}^{r}$ are bounded, $\hat{\lambda}_{J}^{r+1}$ and $\hat{\lambda}_{\infty}^{r+1}$ are bounded, implying that $\int_{\mathcal{M}} (\hat{\lambda}_{J}^{r})^{1/2} < \infty$ and $\int_{\mathcal{M}} (\hat{\lambda}_{\infty}^{r+1})^{1/2} < \infty$.
%
%
It follows that ({\bf A}) $\hat{\lambda}_{J}^{r}(x) \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{r}(x)$, and ({\bf B}) $\hat{\lambda}_{J}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_2$ by induction.
\end{enumerate}



\subsection{Local-EM and the Penalized Likelihood} \label{appx_penalized}

%In this section we study the penalized likelihood of \S\ref{appx_ems} under the conditions of \S\ref{appx_convergence} in which $k\rightarrow \infty$.
For clarity, assume $\map_i = \map$ for all $i$ and $\off_{ij}=1$. Consider those values of $\boldsymbol{\theta}$ for which the penalty $\boldsymbol{\theta}^{T} \mathbf{R} \boldsymbol{\theta}$ is minimized. For such $\boldsymbol{\theta}$, we have
\begin{eqnarray*}
\mathbf{R} \boldsymbol{\theta}&=&(\mathbf{\cal K}_h^{-1} - n\mathbf{I})\boldsymbol{\theta}=0
\end{eqnarray*}
or rather
\begin{eqnarray}\label{eigen}
\boldsymbol{\theta}&=& n\mathbf{\cal K}_h \boldsymbol{\theta}.
\end{eqnarray}
This permits an interpretation of ${\cal L}_p(\boldsymbol{\theta})$
as penalizing the nonparametric likelihood on the basis of the
proximity of $\boldsymbol{\theta}$ to the maximal eigenvector of
the smoothing matrix $\mathbf{\cal K}_h$. To see this, let
$\varrho_{(\ell)}$ denote the $\ell$th largest eigenvalue of
$\mathcal{K}_h$ with its corresponding eigenvector $\boldsymbol{\gamma}_{(\ell)}$. 
Let $\boldsymbol{\Gamma}=\begin{bmatrix} \boldsymbol{\gamma}_{(k)} &
\boldsymbol{\gamma}_{(k-1)}& \cdots & \boldsymbol{\gamma}_{(1)}
\end{bmatrix}$. Then the spectral decomposition of $\mathbf{R}$ is
$ \boldsymbol{\Gamma} \mathbf{D} \boldsymbol{\Gamma}^{T},$ where
$\mathbf{D}=\text{diag}\left( \varrho_{(k)}^{-1}, \ \varrho_{(k-1)}^{-1},
\ \ldots, \ \varrho_{(1)}^{-1}\right) - \mathbf{I}$.
%
Since $\varrho_{(1)} \leq 1$, $\mathbf{R}$ penalizes eigenvectors with small eigenvalues more than those with large ones.

We note that for $\boldsymbol{\theta}$ satisfying the condition
(\ref{eigen}) we have
\begin{equation}
\boldsymbol{\theta}^T \boldsymbol{\theta}- n\boldsymbol{\theta}^T {\cal
K}_h \boldsymbol{\theta}=0 \label{eigen_2}.
\end{equation}
We may consider the limit of the left-hand side as $J \rightarrow \infty$ and $\max_j |\!|Q_j|\!| \downarrow 0$. Note $[{\cal K}_h]_{jk}$ and $\theta_j$ may be
approximated as
\begin{eqnarray*}
 [{\cal K}_h]_{kj}&=& \Delta^{-1} \int_{Q_j} \int_{Q_k} 
\dfrac{K_h(u-x)}%
{n \int_{\map} K_h(u-x) \dee{u}} \dee{u} \dee{x}
\approx \dfrac{K_h(x_k - x_j)}{n \int_{\map} K_h(u-x_j) \dee{u}} \Delta_u \Delta_x \\
\theta_j^2 &=& \left( \int_{Q_j} \rho(u) \dee{u} \right)^{1/2} \left( \int_{Q_j} \rho(x)\dee{x} \right)^{1/2} = \rho(x_j) \Delta_u \Delta_x.
\end{eqnarray*}
Let $\Delta = \Delta_u \Delta_x$ and note $\Delta \downarrow 0$ as $J \rightarrow \infty$. The left-hand side of (\ref{eigen_2}) may now be written as
$$
\sum_j \rho(x_j)\Delta - \sum_{jk} \rho^{1/2}(x_j) \dfrac{K_h(x_k-x_j)}{\int_{\map} K_h(u-x_j) \dee{u}} \rho^{1/2}(x_k)\, \Delta^2
$$
and as $J \rightarrow \infty$ this expression becomes
$$
\int_{\map}\rho(u)\, \mathrm{d}u-\int_{\map} \int_{\map} \rho^{1/2}(x) \dfrac{K_h(u-x)}{\int_{\map} K_h(u-x_j) \dee{u}} \rho^{1/2}(u)\, \mathrm{d}u\,\mathrm{d}x,
$$
As a result, the penalty will equal 0 for any function $f$ belonging to the following class
$$
{\cal Z}=\left\{f \ \Bigg | \, f^{1/2}(x)=\int_{\map} \dfrac{K_h(u-x)}{\int_{\map} K_h(u-x_j) \dee{u}} f^{1/2}(u) \dee{u} \mbox{ for all $x \in \map$}
\right\}.
$$
Given this and the results of \S\ref{appx_convergence}, we conclude that the local-EM
algorithm penalizes the nonparametric likelihood for departures of the target function from the class of
maximal eigenfunctions $\cal Z$.

\end{document}