\documentclass[12pt]{article}
\usepackage{layout,latexsym, array, enumerate, amsmath, amsthm,amssymb, amsfonts, natbib, subfigure, color}
\usepackage[mathscr]{eucal}
\usepackage{epsf,epsfig}
%\usepackage{epsf,epsfig,eufrak,dsfont}
\bibliographystyle{apalike}

\definecolor{grey}{RGB}{190,190,190}

\textwidth 6.5in \textheight 9.00in \oddsidemargin -0.15in
\evensidemargin -0.15in \topmargin -0.25in
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}{Proposition}[section]
\newtheorem{corollary}{Corollary}[section]
\newtheorem{example}{Example}[section]
\newtheorem{lemma}{Lemma}[section]
\newtheorem{defn}{Definition}[section]
\newcommand{\lowtilde}[1]{\mathop{#1}\limits_{\textstyle\tilde{}}}
%\renewcommand{\baselinestretch}{1.4}

\newcommand{\off}{\mathcal{O}}
\newcommand{\poly}{{\cal P}(u-s;\mathbf{a})}
\newcommand{\coef}{\mathbf{a}}
\newcommand{\lik}{\ensuremath{\mathcal{L}}}
\newcommand{\map}{\ensuremath{\mathcal{M}}}
\newcommand{\area}[1]{\ensuremath{|\!|#1|\!|}}
\newcommand{\norm}[1]{\ensuremath{\left|\!\left|#1\right|\!\right|_1}}
\newcommand{\dee}[1]{\ensuremath{\,\mathrm{d}#1}}
\newcommand{\Steve}[1]{{\tt (#1)}}

\newcommand{\comment}[1]{}

\begin{document}
\title{Local-\textsc{em} and the \textsc{ems} Algorithm}
\author{Chun-Po Steve Fan\footnote{Bank of Montreal}, Jamie Stafford\footnote{University of Toronto} and Patrick E. Brown\footnote{University of Toronto and Cancer Care Ontario} }
\thispagestyle{empty}
\date{}
\maketitle
%\renewcommand{\baselinestretch}{1.4}
\begin{abstract}
The use of local likelihood methods \citep{tibshirani1987ll,loader1999lra} in the presence of data that is either interval or area censored leads naturally to the
consideration of \textsc{EM}-type strategies, or rather local-\textsc{em} algorithms.
In this paper we consider a class of local-\textsc{em} algorithms suitable for density or intensity estimation in the temporal or spatial context. We demonstrate that using a piecewise constant density function at the E-step results in the algorithm collapsing
explicitly into an \textsc{ems} algorithm of the type considered by \cite{silvermanems}. 

This discovery has two advantages. Identifying a relationship between local likelihood and the \textsc{ems} algorithm means the former provides a natural context for the latter further to that given in \cite{nychka1990spa}. In addition, the latter guides the implementation, and interpretation, for local-\textsc{em} algorithms. For example, we
expose a previously unknown connection between local-\textsc{em} algorithms and penalized likelihood that is analogous to the more familiar pairing of \textsc{EM} and likelihood. Examples include exploring the spatial structure of the disease Lupus in the City of Toronto.
\end{abstract}

\vspace{10pt} \small \it Keywords: density estimation; intensity
estimation; interval and area censoring; local likelihood; panel counts; lupus; penalized
likelihood; self-consistency \rm \normalsize

\section{Introduction}
In this paper we consider the use of local likelihoods for density and intensity estimation when data are only partially observed. Here data may be interval censored,
they may be temporal and come in the form of panel counts, or they may be spatial and area censored. Whatever form the censoring takes, the use of local likelihood techniques naturally leads to the consideration of local-\textsc{em} algorithms.

The use of local-\textsc{em} algorithms has precedents in the literature. Examples include \cite{tanner1987mi}, \cite{betensky1999leh}, \cite{braun2005lld}, \cite{tolusso2008red} and methods here are somewhat similar to those of \cite{braun2005lld} (see \S 2.4). Here the method of implementation can vary where, for instance, implementation of the E-step might involve multiple imputation or Monte Carlo integration or numerical integration and so on. Despite this, and the interesting developments given in these papers, one common challenge that remains is demonstrating convergence of the local-\textsc{em} algorithm to a fixed point. Furthermore, it is not clear whether this fixed point maximized any particular criterion. For example, while we know \textsc{EM} is a tool useful for maximizing likelihoods or posterior distributions, what is the analogy for local-\textsc{em}?

In this paper we consider implementing the E-step of a local-\textsc{em} algorithm by approximating conditional expectations using a piecewise constant density function. This results in the local-\textsc{em} algorithm collapsing explicitly into an \textsc{ems} algorithm of the type proposed in \cite{silvermanems}. There an \textsc{ems} algorithm is constructed by simply adding a smoothing step to the expectation and maximization steps of the usual \textsc{em} algorithm. Given each iteration of a local-\textsc{em} algorithm involves smoothing, an expectation step and a maximization step, and hence is already EMS in character, it is perhaps not surprising that this explicit relationship exists.

%\cite{silvermanems} refer to this method as being {\it ad hoc}. 

Identifying a relationship between local-\textsc{em} and the \textsc{ems} algorithm has two advantages. First, it embeds the \textsc{ems} algorithm in the local likelihood context where it is seen to arise naturally as an implementation of a local-\textsc{em} algorithm.
%\footnote[1]{\cite{nychka1990spa} demonstrates that a modified \textsc{ems} algorithm is related to penalized likelihood. As a result he also suggests that the \textsc{ems} algorithm is not {\it ad hoc}.}.
Secondly, the \textsc{ems} algorithm has been extensively studied. Much is known about its convergence \cite[]{latham1995ems} and its relationship to penalized likelihood~\cite[]{nychka1990spa}. This can be used to inform the use of local-\textsc{em}. For example, the latter suggests a previously unknown connection between local-\textsc{em} and penalized likelihood that is similar to the more familiar pairing of \textsc{EM} and likelihood.

This paper has the following structure. In \S 2 we expose the relationship between local-\textsc{em} and the \textsc{ems} algorithm by beginning with a formal model that captures the temporal and spatial contexts. We describe local-\textsc{em} in \S 2.2 and in \S 2.3 demonstrate that \textsc{ems} arises naturally as an implementation of the local-\textsc{em} algorithm. In \S 2.4 we consider the simple case of density estimation to exemplify developments to that point. Finally, in \S 2.5 we prove the resulting \textsc{ems} iterate converges uniformly to its local-\textsc{em} counterpart.

In \S 3 we exploit the relationship between local-\textsc{em} and the \textsc{ems} algorithm to gain insight into numerical convergence issues and to expose the role of local-\textsc{em}. In \S 3.1 results of Latham (1985) are used to demonstrate the uniqueness of a fixed point. In addition, we provide an upper bound for local convergence and give conditions for this upper bound to shrink towards zero. In \S 3.2 we demonstrate that the use of an equivalent kernel in a local-\textsc{em} algorithm leads to the modification necessary
to maximize a penalized likelihood (Nychka 1990). This result suggests that, at least
for the contexts considered in this paper, local-\textsc{em} and penalized
likelihood may be paired in a manner analogous to the pairing of \textsc{EM}
and likelihood. 

In \S 4 we present two further examples where one concerns intensity estimation for panel count data the other estimation of the spatial distribution of the disease lupus in the Greater Toronto Area. The latter may be viewed as extending the image reconstruction techniques of \cite{silvermanems} to an epidemiological setting where, in the past, it was unclear how to proceed. One common observation in the examples of this paper is that local-\textsc{em} explicitly extends the self-consistency algorithms of \cite{turnbull1976edf}, \cite{hu2008gls} and \cite{vardi1985pet}. Another is that, in simple cases, local-\textsc{em} reduces to known methods like that of Jones (1989) for smoothing histograms or those of \cite{brillinger1990st, brillinger1991si, brillinger1994esp} for smoothing spatially aggregated data. Simple cross validation is used to select an appropriate bandwidth.   Concluding remarks may be found in \S 5.

\section{Methods}

\subsection{Point processes and intensity functions}

An inhomogeneous Poisson process with intensity function $\rho(s); s \in \Re^d$ has the number of events in a region $A$ being Poisson distributed with mean $\int_A \rho(u) du$.  The numbers of events in two disjoint regions are independent.  Here we are concerned with estimating the intensity from a series of Poisson processes $X = \{X_i; i=1 \ldots M\}$ with intensities $\rho_i(s)$, and process $X_i$ has event locations $X_i = \{X_{ik}; k=1\ldots K_i\} \subset \map_i$.  Further, the intensities differ by known ``offset'' values $\off_i(s)$ with $\rho_i(s) = \lambda(s)\off_i(s)$.  Using the term ``IPP' to denote an  inhomogeneous Poisson processes, the model can be written as 
\[
X_i \sim \text{IPP}[\off_i(s)\lambda(s)].
\]

A spatial process has dimension $d=2$, and the notation above could correspond to the $X_{ik}$ being residential locations of individuals having contracted a given disease.  The realisations might have $i=1$ being male cases and $i=2$ being females.  The $\off_i(s)$ would be the expected intensity of cases given the population distribution and the disease incidence rate for each sex.  The $\lambda(s)$ surface would be caused by spatially varying risk factors such as air pollution.  

A temporal process could result from $i$ denoting subjects in a cohort and the $X_{ik}$ being incidence times of a medical event.  The $\off_i(s)$ would relate to subject-specific characteristics such as smoking status, and the $\lambda(s)$ would model the tendency for the relative intensity of events to increase with age.  In both the spatial and temporal case, a value of $\off_i(s)=0$ would correspond to events at time or location $s$ being unobservable.

Given a realisation $\{x_{ik}\}$, the likelihood for a given intensity $\lambda$ is: 
\begin{equation}\label{eq:ippLikelihood}
L(\lambda) =  \prod_{i=1}^M \left[\prod_{k=1}^{K_i} \off_i(x_{ik})\lambda(x_{ik})\right] \exp\left[ -  \int_{D_i} \off_i(u) \lambda(u) du\right].
\end{equation}
For further details, see for example \citet{illian2008statistical}.
Local likelihood produces a non-parametric estimate of $\lambda(s)$ by giving more weight to the data close to $s$ when estimating at $s$.  
Applying a kernel $K_h(u)$  with bandwidth $h$ to the log of the likelihood in (\ref{eq:ippLikelihood}), and bringing the kernel inside the sums and integral, yields
\begin{equation}\label{eq:ippLocalLikelihood}
{\cal L}(\lambda;s) =  \sum_{i=1}^M \left[\sum_{k=1}^{K_i}K_h(x_{ik}-s) \log[\lambda(x_{ik})] \right]  -  \int_{D_i} K_h(u-s) \off_i(u) \lambda(u) du +C.
\end{equation}
The constant $C$ depends on the $\log\off_i(x_{ij})$, which will be finite since events cannot occur at locations where $\off_i(s)=0$.  

Maximizing (\ref{eq:ippLocalLikelihood}) without restrictions on $\lambda$ gives infinite spikes at the $x_{ik}$, which can be prevented by positing a parametric formula for $\lambda$.  
\citet{loader1999lra} suggests approximating $\lambda$ with exponentiated polynomials, with coefficients varying with $s$.  For a location $u$ in the vicinity of $s$, replace $\lambda(u)$ with $\lambda(u;\coef,s) = \exp[\poly]$, where ${\cal P}:\Re^d \rightarrow \Re$ is a polynomial of order $p$ and $\coef$ is a vector of polynomial coefficients.  Here we consider locally constant models (polynomials of degree zero) with $\coef$ being a single coefficient and  locally log-linear models where $\coef$ is of length 2 in one dimension and length 4 in two dimensions.  At each location $s$ a different $\hat\coef(s)$ is estimated to maximise ${\cal L}(\coef;s)$ where
$${\cal L}(\coef;s)=\sum_{i=1}^M \left [
\sum_{k=1}^{K_i} Y_{ij}\ K_h(x_{ik}-s) {\cal P}(x_{ik}-s;\coef) -  \int_{\map_i} K_h(u-s) \off_i(u) \exp[{\cal P}(u-s;\coef)] du\right ].$$
  
\subsection{Aggregated data and Local EM}\label{sec:llems_alg}

Area- or interval-censored data can be described as the observations from realisation $i$ being observed aggregated to a tesselation or map of disjoint regions $S_{ij}$ with 
$\map_i = \bigcup \{S_{ij}; j = 1 \ldots J_i\}$. 
For spatial data the $S_{ij}$ might be geographical reporting regions, and for temporal data they could refer to intervals between clinic visits.  The observed data are the case counts $Y_{ij}$ in region $S_{ij}$ from realisation $i$, or more formally $Y_{ij} = ||\{k; X_{ik} \in S_{ij}\}||$.  When the censoring regions are the same across realisations ($\map_i = \map$), estimation of $\lambda(s)$ is well studied \citep[see, for example][]{brillinger1990st}.  The central problem addressed by this paper is non-parametric estimation of $\lambda(s)$ when the censoring regions are different for each realisation.    For example, the geographic reporting regions changing every 10 years or each individual $i$ having a different set of clinic visit times. 

Local-EM maximises the expected likelihood in place of (\ref{eq:ippLocalLikelihood}), to account for the locations $x_{ik}$ being unknown.  As the expectation depends on the intensity $\lambda$, the procedure is iterative.  At iteration $r$, the intensity  $\lambda^{r-1}(\cdot)$ is used to take the expectation of ${\cal L}(\coef;s)$ conditioning on the $Y_{ij}$. By moving the expectation inside the sums and noting that the contributions from each $x_{ik}$ in a given  $S_{ij}$ are identical, the local-\textsc{em} recursion at iteration $r$ becomes: 
\begin{multline}
\label{eq:formalEM}
{\cal L}^{r}(\coef;s) = \sum_{i=1}^M \left [
\sum_{j=1}^{J_i} Y_{ij}\ \text{E}_{X} \left\{  K_h(X-s) {\cal P}(X-s;\coef)
\ \Bigg |X \in S_{ij}\ ;\ \hat\lambda^{r-1}(\cdot)  \right\} \right.
\\ \left. -  \int_{\map_i} K_h(u-s) \off_i(u) \exp[{\cal P}(u-s;\coef)] du.\right ]
\end{multline}
Maximizing ${\cal L}^{r}$ for different values of $s$ gives the updated coefficients 
\[
\hat\coef^r(s) = \text{argmax}_\coef {\cal L}^{r}(\coef;s)
\]
and resulting intensities $\hat\lambda^{r}(s) = \lambda[s;\hat\coef^{r}(s),s] = \exp[\hat a_1^{r}(s)]$.  Here $a_1$ is the intercept term in the coefficients $\coef$, as the higher order coefficients are multiplied by zero when evaluating the polynomial at $s$.

The algorithm differs from a typical EM algorithm because, at the
E-step, expectation is computed with respect to an estimate of the
infinite dimensional parameter $\lambda(\cdot)$ while, at the M-step, we only
estimate this parameter locally at $s$. As such the typical
arguments concerning convergence of the EM algorithm cannot be
brought to bear. Furthermore, if the local-EM algorithm converges to
a fixed point $\hat{\lambda}$, it is not clear what criterion this fixed point
optimizes.

When maximizing  (\ref{eq:formalEM}) it is convenient to solve the local likelihood equation for the intercept parameter  $a_1$ separately from the equations for the higher-order coefficients.  Writing $\coef_{-1}$ as the vector of polynomial coefficients with the intercept replaced by zero, define
\[
\Psi(s;\coef) = \sum_{i}\int_{{\cal M}_i}{\cal O}_i(u)K_h(u-s)\exp[{\cal P}(u - s;\coef_{-1}) du.
\] 
Solving for $a_1$ gives the local-\textsc{em} recursion explicitly as
\begin{equation}\label{eq:efficientEMS}
\hat{\lambda}^{r}(s)=\sum_{ij}
Y_{ij}\mbox{E}_{X}\left[
K_h(X-s) | X \in S_{ij};\hat\lambda^{r-1}(\cdot) \right]\left/\Psi\left(s;\hat{\coef}^{r}_{-1}(s)\right)\right. .
\end{equation}
where $\hat{\coef}^{r}_{-1}(s)$ results from solving the remaining local likelihood equations. 
Note that, given the offset surface is assumed to be constant over each region $S_{ij}$, expectation is computed with respect to the conditional density
\[
f[x|x\in S_{ij};\lambda] = {{\rho(x)}\over{\int_{S_{ij}} \rho(u) du}} ={{\lambda(x)}\over{\int_{S_{ij}} \lambda(u) du}} .
\]


\subsection{Implementation as an EMS algorithm}

An EMS algorithm results from a local-EM by discretizing 
$\lambda$ as a piecewise constant function. This effectively reduces an infinite dimensional
estimation problem to one that has finite
dimension.
We divide the study area (or time period)  $\map=\cup_{i=1}^M \map_i$ into a number of disjoint regions (or intervals) $Q_\ell$ with ${\cal Q}=\{Q_\ell; \ell=1 \ldots,L \}$, and further insist on the $Q_\ell$ being entirely contained within (and not overlapping with) the censoring regions $S_{ij}$.  So for every $i$ and $\ell$, there exists a single $j$ such that $Q_\ell \subset S_{ij}$ (unless $Q_\ell$ is outside $\map_i$).  The $\cal Q$ with the smallest number of elements would be the regions obtained by overlaying all of the boundaries of the maps $\map_i$, which in one dimension is the time intervals formed by ordering all of followup times for each subject.   Another possible $\cal Q$ is the cells of a pixelated grid sufficiently fine that none of the cells 
intersects with more than one region on each map.

We approximate the risk surface $\lambda(s)$ by the piecewise constant function $\bar\lambda(s)$ formed from the integrated values 
\[
\bar\lambda_\ell = \int_{Q_\ell} \lambda(s) ds / ||Q_\ell||,
\]
with $\bar\lambda(s) = \bar\lambda_\ell; s \in Q_\ell$. 
Here $\bar\lambda$ may be formally referred to as the $\mathcal{Q}$-approximant of $\lambda$ (see \cite{royden1988ra} for details).  The estimate of $\bar\lambda$ is used in place of $\hat\lambda$ when calculating the conditional expectations in (\ref{eq:efficientEMS}), with the effect that a finite number of quantities ($\bar\lambda_\ell$) need be estimated rather than the infinite-dimensional $\lambda(s)$.

The notation which follows is somewhat simplified by working with $\Lambda_\ell=||Q_\ell||\bar\lambda_\ell$ in place of the $\bar\lambda_\ell$.  Writing ${\cal I}_{ijl}$ as an indicator function for   $S_{ij}$ intersecting with $Q_l$, the conditional
density of an event known to be in region $S_{ij}$ is then
\[
f\left[x | x \in S_{ij};\bar\lambda\right] = {\cal I}_{ijl} \Lambda_\ell \left/ ||Q_\ell|| \sum_{m=1}^L \Lambda_m {\cal I}_{ijm}\ ; \ x \in Q_\ell \right. .
\]
The conditional expectation from (\ref{eq:efficientEMS}), using $\bar\lambda$ in place of $\lambda$ is then
\begin{eqnarray*}
\text{E}_{X} \left[  K_h(X-s)  
\ |X \in S_{ij}\ ;\ \bar\lambda(\cdot) \right] &=&  
 \int_{S_{ij}} K_h(u-s) f(u|u\in S_{ij};\bar\lambda) du\\
&=& \sum_\ell
\frac{{\Lambda}_{\ell} {\cal
I}_{ij\ell} }{
\sum_m
\Lambda_{m} {\cal I}_{ijm} 
}
\int_{Q_\ell} K_h(u-s)  / ||Q_\ell||  du
\\
&=&\sum_{\ell} \frac{{\Lambda}_{\ell} {\cal
I}_{ij\ell} \int_{Q_\ell} K_h(u-s) \dee{u}}{\area{Q_{\ell}}\sum_m {\Lambda}_{m} {\cal I}_{ijm}}\cdot \label{Cev}
\end{eqnarray*}

Substitution of the above expression into (\ref{eq:efficientEMS}) leads to the iteration
\begin{eqnarray*}
\hat{\Lambda}^{r}_\ell&=&\int_{Q_\ell} \hat{\lambda}^{r}(s)\, \mathrm{d}s\\
%
&=&\int_{Q_\ell} \left\{ \sum_{ijm}Y_{ij}\frac{\hat{\Lambda}^{r-1}_{m}{\cal
I}_{ijm}}{\area{Q_m} \sum_n \hat{\Lambda}^{r-1}_{n} {\cal
I}_{ijn}} \frac{\int_{Q_m} K_h({u-s})\,\mathrm{d}u}{\Psi(s; \hat\coef_{-1}^{r}(s))}\right \} \dee{s}\\
%
&=&\sum_{ijm}Y_{ij}
\frac{\hat{\Lambda}^{r-1}_{m} {\cal I}_{ijm}}{\area{Q_m}\sum_n \hat{\Lambda}^{r-1}_{n} {\cal I}_{ijn}} \int_{Q_\ell} \frac{\int_{Q_m}K_h(u-s)\dee{u}}{\Psi(s; \hat\coef_{-1}^{r}(s),h)}\dee{s}.
\end{eqnarray*}
The iteration may be conveniently expressed in terms of matrices by writing $\bf\Lambda$  as the vector of $\Lambda_\ell$'s and
\begin{eqnarray}\label{EMSic}
\hat{\bf \Lambda}^{r}={\mathfrak M}(\hat{\bf \Lambda}^{r-1}) {\cal K}_h(\hat{\bf \Lambda}^{r-1}).
\end{eqnarray}
Here ${\cal K}$ is an $L$-by-$L$ smoothing matrix with entries
\begin{equation}\label{smoothstep}
[{\cal K}_h(\hat{\bf \Lambda}^{r-1})]_{\ell m}=
\frac{\tilde{\off}_{\ell}}{\area{Q_{\ell}}}\int_{Q_m}\frac{\int_{Q_\ell}K_h(u-s)\dee{u}}{\Psi(s; \hat{\coef}^{r}_{-1}(s))}\dee{s}
\end{equation}
and ${\mathfrak M}(\hat{\bf \Lambda}^{r-1})$ is a $L$
dimensional row vector whose $\ell$th entry is
\begin{equation}\label{EMstep}
[{\mathfrak M}(\hat{\bf \Lambda}^{r-1})]_\ell=\sum_{ij} \frac{Y_{ij}}{\tilde{\off}_\ell}
\frac{\hat{\Lambda}^{r-1}_{\ell} {\cal
I}_{ij\ell}}{\sum_m \hat{\Lambda}^{r-1}_{m} {\cal I}_{ijm}},
\end{equation}
where $\tilde{\off}_\ell = \sum_{ij}\mathcal{I}_{ij\ell} \off_{ij}$.
The latter is recognized as a step in an EM algorithm (\S 2.3, 4.1) and hence the iteration (\ref{EMSic}) is seen to explicitly involve an expectation, maximization \emph{and} smoothing step.
That is, by discretizing $\lambda$ our implementation of the local-EM algorithm has resulted explicitly in an EMS algorithm. EMS algorithms were first proposed by \cite{silvermanems}  as a method for improving the behaviour of the EM algorithm by including a smoothing step. Here they are seen to arise formally from local likelihood considerations when data are interval or area censored. Further comparisons with Silverman are given in \S 5.

\vspace{10pt}
\noindent {\bf Remark:} Note in the above derivation of $\hat{\Lambda}^{r}_\ell$ the local-\textsc{em} iterate $\hat{\lambda}^{r}(s)$ is replaced by an alternate estimate of $\lambda$, namely
\begin{eqnarray}\label{emsiterate}
 \hat{\lambda}^{r}_L(s)=\sum_{ijm}Y_{ij}\frac{\hat{\Lambda}^{r-1}_{m}{\cal
I}_{ijm}}{ \sum_n \hat{\Lambda}^{r-1}_{n} {\cal I}_{ijn}} \frac{\int_{Q_m}K_h\left({u-s}\right)\dee{u}}{\area{Q_{m}}\Psi_h(s; \hat{\coef}^{r}_{-1}(s))}\cdot
\end{eqnarray}
Here we refer to  $\hat{\lambda}^{r}_L(s)$ as the \textsc{ems} iterate and the relationship to its local-\textsc{em} counterpart is discussed in the \S 2.5.

\subsection{The Case of Density Estimation}

%Thus far this paper has been solely concerned with the case of intensity estimation. However, the case of density estimation is analogous to results thus far although the local likelihood differs slightly and the details are technically simpler. We consider this ... briefly here emphasizing that the all the results of this paper, including those that follow, also hold for density estimation. 
The case of density estimation is considered as an exposition of developments thus far. 
Here we assume that the event process for each subject is a common failure time process where there is a single event 
$X_i$  that is interval censored such that $X_i\in S_i=[L_i,R_i]$.
Note that if the event time for the $i$th subject is right censored
we set $R_i=\infty$. The observed data is then a sequence of
independent intervals $S_1 \ldots S_n$ some of which may overlap. We
let ${\cal Q}=\{Q_j; j=1 \ldots,J \}$ denote the partition of the data
defined by the collection of endpoints $\{L_i,R_i;i=1,\ldots,n\}$.
For example, if $n=2$ and $S_1=[0,3],~S_2=[1,2]$ then we would have
${\cal Q}=\{[0,1],[1,2],[2,3]\}$. In this context (\ref{eq:formalEM}) simplifies and density estimation may be facilitated by
\begin{multline}\label{emsdensity}
{\cal L}^{r}(\coef;s) = \sum_{i=1}^n 
\text{E}_{X} \left\{  K_h(X-s) {\cal P}(X-s;\coef)] 
\ \Bigg |X \in S_{i}\ ;\ \hat\lambda^{r-1}(\cdot)  \right\} 
\\  -  n\int_{\Re} K_h(u-s) \exp[{\cal P}(u-s;\coef)] du.
\end{multline}
where $\mathcal{O}_i(u) = 1$ for all $u \in {\cal M}$, $M=n$, ${\cal M}_i=\Re$ for all $i$ and $Y_{ij}=1$ for all $i,j$.
Here we recognize ${\mathfrak M}(\hat{\bf \Lambda}^{r})$  in the iteration (\ref{EMSic}) as a step in the \textsc{em} algorithm of \cite{turnbull1976edf}. In addition, for  the case of a histogram ${\cal Q}\equiv \{S_i; i=1,\ldots,n\}$ and the local-\textsc{em} algorithm (\ref{eq:efficientEMS}) will only iterate once reducing to the methods of Jones (1989) for smoothing histograms. Finally, \cite{braun2005lld} propose a local-\textsc{em} algorithm based on (\ref{emsdensity}) and, without being aware of it, develop an \textsc{ems} implementation.

\subsection{Uniform Convergence of \textsc{EMS} to Local-\textsc{EM}} \label{appx_convergence}

We consider the \textsc{ems} iterate (\ref{emsiterate}) for an arbitrary partition where we let $L \to \infty$ and $\max_j \area{Q_j} \downarrow 0$. We demonstrate that the \textsc{ems} iterate converges to its local-\textsc{em} counterpart in the $\mathcal{L}^1$ norm as well as uniformly. This result suggests local-\textsc{em} and \textsc{ems} techniques may be thought of synonymously.

Consider a partition $\mathcal{Q}$ based on a set of $L$ equally spaced grid points over a finite region $\mathcal{M}$. Without the loss of generality, we consider the partition where elements are squares centred at these grid points. We demonstrate that the \textsc{ems} iterate will converge in $\mathcal{L}^1$ to its local-\textsc{em} counterpart as $L \rightarrow \infty$ and $\max_j |\!|Q_j|\!| \downarrow 0$. For the sake of clarity, we restrict the attention to the locally constant case.

Without the loss of generality, assume $\map_i = \map$ for all $i$. Denote the \textsc{ems} and local-\textsc{em} iterates as $\hat{\lambda}_{L}^{r}$ and $\hat{\lambda}_{\infty}^{r}$, respectively. In addition, assume $S_{ij} \subseteq \mathcal{M}$ for all $i, j$ where $\area{\map} < \infty$. Furthermore, let $K(z)$ be a symmetric positive kernel with compact support where $\int K(z) \, \mathrm{d}z =1$. Finally, define a norm on $\mathcal{M}$ to be $\norm{\lambda} = \int_{\cal M} | \lambda(u) |\, \mathrm{d}u$ and interpret the convergence of the function $f$ to the function $g$ to mean that $\norm{f - g} \rightarrow 0$ as $L \to \infty$. This we denote as $f \stackrel{\mathcal{L}^1}{\longrightarrow} g$. These details permit the statement of the following theorem:
%
%
\begin{theorem} \label{t:L1_convergence} 
%
Define $\mathcal{F}_1 = \left\{ \lambda \in \mathcal{L}^1 \mid
\text{$\lambda$ is non-negative with $\lambda(x) > 0$ for all $x \in \mathcal{M}$} \right\}.$ 
Define $\mathcal{H}_x$ to be $\mathcal{H}_x: \mathcal{L}^1 \mapsto \mathcal{L}^1$ such that
$$\mathcal{H}_x(\lambda) = \int \dfrac{K_h(u-x)}{\int_{\map} \off(u)
K_h(u-x)\dee{u}} \lambda(u)\, \mathrm{d}u,$$ where $K_h(\cdot)=K(\cdot/h)/h$ for some $h>0$ and $\off = \sum_i \off_i$.
Provided that $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$, we have, for a common initial value $\hat{\lambda}_0 \in \mathcal{F}_1$ and for all $r = 1, 2, \ldots$,
\begin{description}
\item{A.} $\hat{\lambda}_{L}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r}$, and
\item{B.} $\hat{\lambda}_{L}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_1$.
\end{description} \label{e: ems_iterate_v1} %%

\end{theorem}

\noindent Theorem~\ref{t:L1_convergence} can be proved by induction using results from operator theory and is given in the appendix.   

\section{Advantages of the \textsc{EMS} Implementation} \label{sec:llem_role}
Thus far we have exposed an interesting relationship between classes of algorithms that demonstrates the \textsc{ems} algorithm arises naturally from local likelihood considerations. This occurs because of the way
we have chosen to implement the local-\textsc{em} algorithm. However, we could have instead chosen to implement this algorithm through multiple imputation, or by using MCEM, or through some other favorite techniques. So why \textsc{ems}?

In this section, we exploit the relationship between local-\textsc{em} and the \textsc{ems} algorithm to gain insight into convergence issues and to expose the role of local-\textsc{em} as being paired with penalized likelihood in a manner analogous to the pairing of \textsc{em} and likelihood. We primarily focus on the local constant case as details are easier to follow.

\subsection{Uniqueness and Convergence}

Our objective is to allow the algorithm (\ref{EMSic}) to iterate until it converges to a fixed point
$\hat{\boldsymbol{\Lambda}}$ that solves
$$
\hat{\bf \Lambda}={\mathfrak M}(\hat{\bf \Lambda}) {\cal K}_h(\hat{\bf \Lambda}).
$$
We rely on results given in \cite{latham1995ems} to demonstrate the uniqueness of $\hat{\boldsymbol{\Lambda}}$ and we study the rate of algorithmic convergence in the neighbourhood of $\hat{\boldsymbol{\Lambda}}$. Specifically, we derive an upper bound for the spectral radius of the \textsc{ems} mapping and demonstrate that this upper bound shrinks toward zero as the value of $h$ goes to infinity.

\vspace{20pt}
\noindent
{\tt Uniqueness:}
In the situation in which the smoothing matrix is independent of $\boldsymbol{\Lambda}$, that is ${\cal K}_h({\bf \Lambda})={\cal K}_h$ as in the local constant case, \citet{latham1995ems} shows the fixed-point solution is unique in the parameter space where $\Lambda_\ell > 0$ for all $\ell$. This implies that, if the \textsc{ems} implementation of the local-\textsc{em} algorithm is convergent at all, its iterations will converge to the unique fixed-point solution $\hat{\boldsymbol{\Lambda}}$. This result is consistent with the observation made in \S2.4 of \cite{braun2005lld}.

\vspace{20pt}
\noindent
{\tt Convergence:}
To demonstrate the convergence of (\ref{EMSic}) \emph{locally}, it is sufficient to show that the \textsc{ems} mapping has a spectral radius less than one at $\hat{\boldsymbol{\Lambda}}$ (see, e.g. \citet{Ortega:1990numerical}). 
Let $\gamma(\boldsymbol{\Upsilon})$ be the spectral radius of the \textsc{ems} mapping at $\boldsymbol{\Lambda}$. 
By the Perroni-Frobenius theorem, $$\gamma(\boldsymbol{\Lambda}) \leq \max_m \sum_\ell \left[ \partial \mathcal{M}\, \mathcal{K}_h \right]_{\ell m},$$ where $\partial \mathcal{M}$ is a $K \times K$ matrix with
\begin{align*}
\left[ \partial \mathcal{M} \right]_{\ell m} &\equiv \dfrac{\partial
\mathcal{M}_k}{\partial \Lambda_\ell} %\Bigg|_{\boldsymbol{\Lambda}=\hat{\boldsymbol{\Lambda}}^{\ast}}
= \begin{cases} 
\sum_{ij} \dfrac{Y_{ij}}{\tilde{\off}_k} \dfrac{\sum_{\ell \ne k} \mathcal{I}_{ijk} \mathcal{I}_{ij\ell}
\Lambda_\ell}{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \Lambda_\ell \right)^2} & \mbox{when $k=t$}\\
%& \\
\sum_{ij} \dfrac{Y_{ij}}{\tilde{\off}_k} \dfrac{-\mathcal{I}_{ijt} \mathcal{I}_{ijk}
\Lambda_k}{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \Lambda_\ell \right)^2} & \mbox{otherwise}
\end{cases}.
\end{align*}
It follows that
\begin{align}
\gamma(\boldsymbol{\Lambda}) & \le \max_m \sum_\ell \left[ \partial \mathcal{M}\, \mathcal{K}_h \right]_{\ell m} 
 \notag \\
% &\hspace{.25in}
%
&\leq \sum_\ell \left| \sum_{ij} \dfrac{Y_{ij}}{\area{Q_\ell}} \dfrac{\sum_{n
\ne k} \mathcal{I}_{ij\ell} \mathcal{I}_{ijn} (\Lambda_n -
\Lambda_{\ell})}{\left(\sum_{n} \mathcal{I}_{ijn}
\Lambda_n \right)^2} \right| %c^{-1} \max_s \int_{Q_s} \int_{Q_k}K_h(u-x) \dee{u} \dee{x} 
\max_m \int_{Q_m} \dfrac{\int_{Q_\ell}K_h(u-x) \dee{u}}{\sum_{n} \tilde{\off}_n \int_{Q_{n}}  K_h(u-x) \dee{u}}\dee{x}
\label{e:spectral_upper}
\end{align}
We set the upper bound for $\gamma(\boldsymbol{\Lambda})$ to be the last inequality of (\ref{e:spectral_upper}) and, in the appendix, discuss conditions necessary  for this to be less than one.

\subsection{The Role of Local-\textsc{EM}} \label{appx_ems}
\cite{nychka1990spa} identified a relationship between \textsc{ems} and
penalized likelihood by demonstrating that a modified \textsc{ems} algorithm
maximizes 
\begin{equation} \label{e:npllk}
\mathcal{L}(\boldsymbol{\theta}) 
+{\bf Pen}(\boldsymbol{\theta}, {\cal K}).
\end{equation} 
Here $\mathcal{L}(\boldsymbol{\theta})$ is the appropriate nonparametric likelihood and is context dependent \citep{turnbull1976edf, wellner2000npmle, vardi1985pet}. 
The parameter $\boldsymbol{\theta}$ is a vector with components $\theta_{ij}$ such that $\theta_{ij}^2 =\Lambda_{ij}$ for all $i,j$, and ${\bf Pen}(\boldsymbol{\theta}, 
{\cal K})$ is a penalty function that depends on both $\boldsymbol{\theta}$ and some smoothing matrix ${\cal K}$. Below we demonstrate that, with the appropriate choice of kernel, namely an equivalent kernel, the local-\textsc{em} algorithm may be used to maximize a penalized likelihood function. This occurs because the equivalent kernel leads, under the discretization of $\lambda$, to Nychka's modification of the \textsc{ems} algorithm.

We begin by first considering  the following penalty:
$$
{\bf Pen}(\boldsymbol{\theta}, {\cal K})=\boldsymbol{\theta}^{T} \mathbf{R} \boldsymbol{\theta}.
$$ 
$\mathbf{R} = \mathcal{K}^{-1} - \tilde{\boldsymbol{\off}}$, where $\mathcal{K}$ is the smoothing matrix in (\ref{smoothstep}) and $\tilde{\boldsymbol{\off}}$ is a diagonal matrix with entries $\tilde{\off}_\ell$. Next, we explore the relationship between this penalized likelihood and the local-\textsc{em} algorithm by considering the following function
$$ (1/\lambda(u))^{1/2} K_h(u-x), $$
where $\lambda$ is the smooth component of the true density or intensity, and $K_h(u-x)$ is any symmetric positive kernel with compact support. Let $c(x)$ be the normalizing constant respect to $u$ such that, 
\begin{equation} \label{e:equivalent_kernel}
K_{h}^{\ast}(u - x) = c^{-1}(x) K_h(u-x)/\lambda^{1/2}(u) 
\quad \mbox{and} \quad
\int K_{h}^{\ast}(u - x) \dee{u} = 1.
\end{equation}
We refer to $K_h^{\ast}$ as an equivalent kernel. Next, consider the use of the equivalent kernel with the $\cal Q$-approximant $\bar{\lambda}^{r}$ in our local-\textsc{em} algorithm while assuming $\area{Q_j} = \area{Q}$ for all $j$. 
%It can be shown that $c(x) = \lambda^{-1/2}(x) + o(h)$ for a small value of $h$; as a result, the first-order approximation of  (\ref{e:equivalent_kernel}) is  given by
With the first-order approximation of  (\ref{e:equivalent_kernel}), i.e.\
$$
K_{h}^{\ast}(u - x) = (\lambda(x)/\lambda(u))^{1/2} K_h(u-x)+ o(h),
$$
the conditional expectation becomes:
\begin{eqnarray*}
\mbox{E}\left[ K_h^{\ast}(X-x) \mid X \in S_{ij}; \bar\lambda^r \right] &=& 
\sum_{k} \left(
\dfrac{\hat{\Lambda}_{\ell}^{r}}{\hat{\Lambda}_{k}^{r}} 
\right)^{1/2} 
\area{Q}^{-1} \int_{Q_{k}} K_h(u-x)\dee{u}
\dfrac{\mathcal{I}_{ijk}\hat{\Lambda}_{k}^{r}}{\sum_{m} \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{r}} + o(h)
\end{eqnarray*}
for $x \in Q_\ell$. This in turn gives the following iteration for $\boldsymbol{\Lambda}$:
\begin{align}
\hat{\Lambda}_{\ell}^{r+1} &= \sum_{ijk} \left( \dfrac{\hat{\Lambda}_{\ell}^{r}}%
{\hat{\Lambda}_{k}^{r}} \right)^{1/2} \area{Q}^{-1} \int_{Q_\ell} \dfrac{\int_{Q_k} K_h(u-x) \dee{u}}{\int_{\sum_i \mathcal{M}} \off_i(u) K_h^{\ast}(u-x) \, \mathrm{d}u} \, \mathrm{d}x \dfrac{ Y_{ij} \mathcal{I}_{ijk} \hat{\Lambda}_{k}^{r}}{\sum_m \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{r}} \notag \\
%
& = \sum_{ijk} \left( \dfrac{\hat{\Lambda}_{\ell}^{r}}%
{\hat{\Lambda}_{k}^{r}} \right)^{1/2} \area{Q}^{-1} \int_{Q_\ell} \int_{Q_k} K_h(u-x) \dee{u} \dee{x} %
\frac{1}{\tilde{\off}_\ell} \dfrac{ Y_{ij} \mathcal{I}_{ijk} \hat{\Lambda}_{k}^{r}}{\sum_m \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{r}} + o(h)
\label{e:modified_EMS}
\end{align}
The expression (\ref{e:modified_EMS}) can be re-expressed in the following matrix form:
\begin{equation} \label{modified_EMSpl}
\hat{\boldsymbol{\Lambda}}^{r+1}={\mathfrak M}(\hat{\boldsymbol{\Lambda}}^{r}) {\cal K}_h^{\ast}\left(\hat{\boldsymbol{\Lambda}}^{r}\right).
\end{equation}
${\cal K}_h^{\ast}\left(\hat{\boldsymbol{\Lambda}}^{r} \right) =
\left(\hat{\boldsymbol{\Theta}}^{r}\right)^{-1}\, \tilde{\boldsymbol{\off}} \, \mathcal{K}_h \, \tilde{\boldsymbol{\off}}^{-1} \, \hat{\boldsymbol{\Theta}}^{r}$, where $\hat{\boldsymbol{\Theta}}^{r} = \text{diag}(\hat{\theta}_{k}^{r})$. Note that $\off_k = 1$ in the context considered by \cite{silvermanems} and \cite{nychka1990spa}. In addition, provided that $\off_k = 1$ for all $k$, the iteration (\ref{modified_EMSpl}) is recognized as Nychka's modified \textsc{ems} algorithm with the smoothing matrix equal to ${\cal K}={\cal K}_h$. In other words, this iteration can be regarded as a generalization of Nychka's modified \textsc{ems}  allowing for offsets.


\bigskip
\noindent {\bf Remark:} %{\bf $\mathbf{\mathcal{L}}^1$-Convergence of EMS to local-EM:} 
The theoretic results in $\mathcal{L}^1$ and uniform convergence stated in \S3.3 can also be extended with the equivalent kernel in the local-\textsc{em} algorithm. We will re-state Theorem (\ref{t:L1_convergence}) with the equivalent kernel (\ref{e:equivalent_kernel}) as the following proposition and include the proof in the appendix.
\begin{proposition} \label{t:L1_convergence_equivalent_kernel} 
When the equivalence kernel (\ref{e:equivalent_kernel}) is used, we
instead define
$$
\mathcal{F}_2 = \left\{ \lambda \in \mathcal{L}^1 {\Big |} \text{$\lambda$ is
non-negative with $\lambda(x) > 0$ for all $x \in \mathcal{M}$ and
$\int_\mathcal{M} \lambda^{1/2} < \infty$}\right\}.
$$
For a common initial value $\hat{\lambda}_0 \in \mathcal{F}_2$, we have, for all $r = 1, 2, \ldots$,
\begin{description}
\item{A.} $\hat{\lambda}_{J}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r}$, and
\item{B.} $\hat{\lambda}_{J}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_2$.
\end{description} \label{e: ems_iterate_v1} %%

\end{proposition}

\section{Examples}

\subsection{Intensity Estimation for Panel Count Data}

Consider a temporal process where each individual $i$ has event times $X_{ik}, k=1\ldots K_i$ and 
is observed at a  set of points
${\cal T}_i=\{\tau_{ij}, j = 0 \ldots J_i\}$.  These observations times are either prearranged or determined by a visit process that is assumed to be independent of the event process. Here $S_{ij}=[\tau_{ij-1},\tau_{ij}]$ is referred to as the $j$th panel
for the $i$th individual and the number of events $Y_{ij}$ is recorded for each panel.  Following the setup of \cite{hu2008gls} we set $\off_i(s)=1,~\forall ~i$ and let
$${\cal T}=\cup_i^n {\cal T}_i=\{\tau_\ell; \ell = 0 \ldots L\}$$
so that
${\cal Q}$ denotes a partition of the data
where $Q_\ell=[\tau_{\ell-1},\tau_\ell]$. For this setting the EMS implementation (\ref{EMSic}) has $\mathfrak{M}(\hat{\boldsymbol{\Lambda}}^{(r)})$ reducing to a step in the self-consistent algorithm of \cite{hu2008gls}.

\subsubsection{A simulation study}

A simulation study was carried out to examine the mean integrated squared error (MISE) of the local-EM estimator as well as several alternatives. Event times follow a Poisson process with intensity $\lambda(x)$ equal to a re-scaled
gamma density function (shape = 4.75 and rate=0.75). Each subject is assumed to have a sequence of predetermined observation times $\tau_1, \tau_2, \ldots, \tau_J$ where $\tau_j = j$ and $J=20$ subjects. However, subjects miss a visit with increasing probability, specifically, the probability of missing a visit equals
$(\tau_j/20)^{0.25} - 0.05$. Finally, a subject's panel counts are obtained by aggregating events times among consecutive observed
visits. Note that each subject is assumed to have no event at time 0.

For each of 500 simulations, and for a fixed bandwidth $h$, we compute several estimators of the intensity using a Gaussian kernel. For each estimator, $\hat{\lambda}$, we approximate its MISE as $S^{-1} \sum_k \int (\hat{\lambda}_k(u) - \lambda(u))^2 \, \mathrm{d}u$. This was performed for 49 different values of $h$ between $0.05$ and $2.45$. The resulting MISE's, averaged over simulations, for each estimator are plotted in Figure \ref{fig:pc:mise_bw}. The first estimator is a kernel smoother uses the exact observations, without interval censoring or missing visits, rather than the panel counts. This is the gold standard. For the panel counts we use the partition $\mathcal{Q}$ and compute the local-EM estimator in both the constant and linear cases, that is, where the polynomial is truncated at the first or second term. In addition, as an alternative to, and competitor of, the local-EM estimators we consider simply smoothing the self-consistent estimator of \cite{hu2008gls} after their EM algorithm has converged. 

The results favour the local-\textsc{EM} estimator considerably. While the gold standard achieves the smallest MISE, the local-EM estimators track it quite closely and attain the next smallest MISE for a similar window size. Smoothing after the EM algorithm converges has the worst performance achieving a minimum MISE that is larger for a larger window size. This result is perhaps not all that surprising given that the $\lambda$ is quite non-linear. In cases where $\lambda$ is linear the improvements
in MISE for the local-\textsc{EM} estimator are not as dramatic.

\begin{figure}\centering
\includegraphics[trim = 0mm 4mm 10mm 20mm, clip, width=0.75\textwidth]{mise_vs_bw_30sub_77prob.pdf}
\caption{Mean integrated squared error (MISE) as a function of bandwidth for the kernel intensity estimator using exact observations (---), smoothed EM estimator using left-end points ({\color{red} $- -$}), smoothed EM estimator using midpoints ({\color{green} $- -$}), smoothed EM estimator using right-end points ({\color{blue} $- -$}), local-EM estimator using constant case ($\cdots$), and local-EM estimator using linear case ($- -$). The kernel intensity estimator has the lowest MISE of 0.0609 at a bandwidth of 1.00. The proposed local-EM estimator achieves the lowest MISE of 0.0725 at a bandwidth of 0.90 for panel count data.} 
\label{fig:pc:mise_bw}
\end{figure}

Figure 2 shows MISE as a function of the proportion of missing visits and the number of subjects in the sample.  For each of 500 simulations, the lowest MISE over all possible bandwidths $h$ is retained.  Figure \ref{fig:pc:mise_prob} fixes the number of subjects at 30 and varies the proportion of missing visits.  The difference between the methods increases as the amount of missing visits (and hence aggregation) increases, with local linear EM performing nearly as well as using the complete data when the proportion of missing visits is under 60\%.  Figure \ref{fig:pc:mise_sub} fixes the proportion of missing visits at 77\% and varies the number of subjects.  All methods provide essentially perfect estimates with 1000 subjects, with the differences between estimators increasing as the number of subjects decreases.  Local-\textsc{EM} in the linear case consistently outperforms local-\textsc{EM} in the constant case, which in turn outperforms smoothing after the EM algorithm has converged.

\begin{figure}\centering
\subfigure[Minimum MISE as a function of average probability of missing a visit.]{
\includegraphics[trim = 0mm 4mm 10mm 20mm, clip, width=0.45\textwidth]{mise_vs_prob_30sub.pdf}
\label{fig:pc:mise_prob}
}
\subfigure[Minimum MISE as a function of number of subjects.]{
\includegraphics[trim = 0mm 4mm 10mm 20mm, clip, width=0.45\textwidth]{mise_vs_sub_77prob.pdf}
\label{fig:pc:mise_sub}
}
\caption{Mean integrated squared error (MISE) was computed with a kernel intensity estimator using exact observations (---), smoothed EM estimator using midpoints ($\cdot\cdot\cdot$), locally-constant EM estimator  ($\cdot - $), and locally-linear EM estimator ($- -$).} 
\end{figure}


\subsection{The Spatial Structure of Lupus in the Toronto, Canada}

In this example, we investigate the spatial structure of female Lupus incidence  in the Greater Toronto Area. The lupus clinic at the Toronto Western Hospital records the census tracts where individuals with lupus reside, and has data from 1965 to 2007. If lupus is affected by a spatially varying environmental or social risk factor, it should result in a spatially smooth relative risk surface $\lambda(s)$.

Population data is available from the censuses of 1971, 1981, 1991, 1996, and 2001, 
and the boundaries of the census tracts for 1971 and 2001 are shown in Figure 
\ref{f:lupus_map}a. 
Within each census period $i$ the offset surface $\off_i(s)$ is calculated as a function of the population data (assumed to be constant within census tracts), and estimated rates by age group and a time effect.  These offsets are in turn used in the iteration (\ref{EMSic}) to estimate $\lambda(x)$ from the case data aggregated to census tracts.   To simplify the calculations, the regions $Q_\ell$ on which the algorithm operates form a grid of square cells covering the region and the census tract boundaries are adjusted to align with the grid.
Figure \ref{f:lupus_map}b shows the estimated intensity surface using the local-EM algorithm with locally constant risks and bandwidth of 1.35km chosen by cross-validation.  The risk surface is fairly flat throughout most of the region, with an area of elevated risk in the downtown area of the city.  The area of elevated risk may be due to a factor not accounted for, such as ethnicity, or reporting bias due to the proximity of the clinic.

Finally, note that it is the differing boundaries of these two maps, as well as those for the other years, that necessitate the use of the local-\textsc{EM} algorithm. However if the boundaries of regions had been the same for all the maps, that is $S_{ij}=S_j,~ J_i=J~\forall ~i$, then we can set ${\cal Q}\equiv \{S_j\}$ the algorithm (\ref{eq:efficientEMS}) iterates once reducing to methods advocated by \cite{brillinger1990st, brillinger1991si, brillinger1994esp} in a series of papers concerning spatial smoothing where data are aggregated to regions within a map.


\begin{figure}[ht]
\centering
\subfigure[Boundaries for 1971 (grey) and 2001 (black)]{
\includegraphics[width=0.45\textwidth,clip=true,trim=3cm 3.6cm 1.5cm 4cm]{torCT.pdf}
}
\subfigure[Estimated risk surface]{
\includegraphics[width=0.45\textwidth, angle=0]{Relative_Risk2.png}
}
\caption{Census tract boundaries and estimated risk surface for female lupus using EMS with a bandwith of 1350 m.}
 
\label{f:lupus_map}
\end{figure}

\subsubsection{Spatial Simulation Study}


A simulation study of 500 samples is conducted to assess the performance of the proposed local EM relative risk estimator in a simplified scenario consisting of two maps on a square measuring 5 by 5.  The first map consists of five equally sized vertical rectangles with offset $O_1(s)$ being piecewise constant taking values 18, 28, 38, 28 and 18 in regions $S_{11}$ to $S_{15}$.  The second map consists of 5 horizontal rectangles $S_{21}$ to $S_{25}$ with offsets defined in a similar way.
  Thus overlaying the two maps gives a partition of $Q_\ell$ consisting of 25 unit squares.  

The true relative intensity surface $\lambda(s)$ is the product of two Gamma density functions (one in each spatial dimension) with shape and scale parameters 1.5 and 0.5 respectively.  This function attains the maximum  of $\lambda[(0.25, 0.25)] = 100$.  Individual case locations for each map are simulated as an inhomogeneous Poisson process with intensity $O_i(s)\lambda(s)$ and aggregated to regions $S_{ij}$.  Gaussian kernels with bandwidth varying between 0.01 and 1 were used to estimate $\hat\lambda$ from the aggregated data and offsets, and the MISE was calculated.  MISE as a function of bandwidth is shown in Figure \ref{fig:spat:mise_bw} for the local-\textsc{em} method, the gold standard (a kernel smoother using the exact locations), and a smoothed EM estimator (smoothing after the EM has converged).   Local-\textsc{em} has a minimum MISE which is competitive with the gold standard, with local-\textsc{em} achieving an MISE of 0.00226 at a bandwidth of 0.19 vs the gold standard's MISE of  0.00226 at a bandwidth of 0.19.  Smoothed EM has considerably higher MISE.  

\begin{figure}\centering
\includegraphics[trim = 0mm 4mm 10mm 20mm, clip, width=0.75\textwidth]{mise_vs_bw_spatial.pdf}
\caption{Mean integrated squared error (MISE) as a function of bandwidth for the kernel intensity estimator using exact observations (---), smoothed EM estimator using centroids ($\cdot\cdot\cdot$) and local-EM estimator using constant case ($- -$).} 
\label{fig:spat:mise_bw}
\end{figure}

\section{Discussion}

Local likelihood can be seen as a semi-parametric method, providing a compromise between the power and theoretical rigour of parametric methods and the flexibility of kernel smoothing algorithms.  Local-EM provides a method for applying local likelihood in situations where interval or area censoring with irregular observed regions.  By demonstrating that local-EM and the EMS algorithm are related, it is hoped that the computational advantages offered by EMS will lead to greater adoption of local-EM methods.  Formulating EMS problems in the context of local likelihood allows for a natural and rigorous method of incorporating offsets.

A final comparison of local-EM to \cite{silvermanems} permits further insights beyond what has already been discussed in the paper. In \cite{silvermanems} quantities analogous to $S_{ij}$ and $Q_\ell$ are referred to as observation and reconstruction bins respectively and the context concerns image reconstruction involving a single image rather than multiple maps say. As a result, example \S 5.3 could well be thought of as an extension of the image reconstruction techniques of \cite{silvermanems} to an epidemiological setting. Furthermore, noting there are no offsets in \cite{silvermanems}, the expression (2.2) given there and ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ are related. For example, their weights $p_{st}$ simplify in our setting to the indicator variables ${\cal I}_{ij\ell}$ because we have assumed the locations of events have been measured without error. This observation provides an avenue for extending the local-EM toolbox to settings where data are mismeasured, but this is beyond the scope of this paper. Finally, we note that in our context ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ is an extension of \cite{vardi1985pet} to multiple maps where data are not mismeasured.


%\vspace{15pt}
%\noindent
%{\it should we mention Vardi anywhere, or do we need to demonstrate that ${\mathfrak M}(\hat{\bf \Lambda}^{r})$ is an \textsc{em} step}
\section*{Acknowledgements}
We are grateful to Drs.\ Paul Fortin and Mustafa al-Maini for the provision of the lupus data. We would also like to acknowledge the Natural Sciences and Engineering Research Council of Canada for supporting this research through individual operating grants.

\bibliography{llems}

\appendix
\section{Proof of Theorem~\ref{t:L1_convergence}}
The proof relies on $\mathcal{H}_x(\lambda)$ being a bounded linear functional as well as some other basic results in operator theory stated as lemmas below. These lemma that may be found in \cite{royden1988ra}.

\begin{lemma}\label{lemma:piece_approx}
Let $\lambda \in \mathcal{L}^1$. Then the $\mathcal{Q}_L$-approximant of $\lambda$ converges in ${\cal L}^1$ to $\lambda$ on $\mathcal{M}$ as $L \to \infty$; that is, $\bar{\lambda} \stackrel{\mathcal{L}^1}{\longrightarrow} \lambda$.
\end{lemma}
%
\begin{lemma}\label{lemma:bl_functional}
If $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$, then
$\mathcal{H}_x$ is a linear bounded functional for all $f \in
\mathcal{L}^1$. That is, for all $x, a, b \in \Re$,
$\mathcal{H}_t(af+b) = a\mathcal{H}_x(f)+b$, and there exists a real
number $M_h$ such that $\mathcal{H}_x (f) \leq M_h \norm{f}$.
\end{lemma}

{\bf Proof of Theorem~\ref{t:L1_convergence}:} Consider a fixed $h$ and $n$ throughout the entire proof. Let $r=1$. 
Note that $\int_{S_{ij}} \bar{\lambda}_{0}(u) \dee{u} = \int_{S_{ij}} \hat{\lambda}_0(u) \dee{u}$ by the definition of $\bar{\lambda}_{0}$. Repeated use of the triangle
inequality gives
\begin{equation*}
\begin{split}
\norm{\hat{\lambda}_{L}^{1} - \hat{\lambda}_{\infty}^{1}} 
&\leq \sum_{ij} Y_{ij} \left( \int_{S_{ij}} \hat{\lambda}^0(u)\dee{u} \right)^{-1} \int_{\mathcal{M}} \int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u)
K_h(u-x)\dee{u}}\left| \bar{\lambda}^{0}(u) - \hat{\lambda}^0(u) \right| \dee{u} \dee{x} \\
%line 2
&\leq \sum_{ij} Y_{ij} \left( \int_{S_{ij}} \hat{\lambda}^0(u) \dee{u} \right)^{-1} \int_{\mathcal{M}} \int_{\mathcal{M}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u)
K_h(u-x)\dee{u}} \left| \bar{\lambda}^{0}(u) - \hat{\lambda}^0(u)\right| \dee{u} \dee{x} \\
&\leq \area{\map} \sum_{ij} Y_{ij} \left( \int_{S_{ij}} \hat{\lambda}^0(u) \dee{u} \right)^{-1} M_h \norm{\bar{\lambda}^{0} - \hat{\lambda}^0}.
\end{split}
\end{equation*}
Here the last inequality is due to the finiteness of $\mathcal{S} \subset \mathcal{M}$ and Lemma \ref{lemma:bl_functional}. By Lemma \ref{lemma:piece_approx}, $\bar{\lambda}^{0} \xrightarrow{{\cal L}^1} \hat{\lambda}^0$, thus $\hat{\lambda}_{L}^{1} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{1}$. Moreover, Lemma \ref{lemma:bl_functional} also ensures that $\hat{\lambda}_{L}^{1}$ and $\hat{\lambda}_{\infty}^{1}$ both belong to the class $\mathcal{F}_1$.\\[20pt]
%
\noindent {\it Induction Step:}
%
Assume that $\hat{\lambda}_{L}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r}$ and $\hat{\lambda}_{L}^{r}$,
$\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_1$. Let $b_{ij}^{r} = \dfrac{\int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)\dee{v}}{\int_{S_{ij}} \bar{\lambda}_{L}^{r}(v) \dee{v}}.$ With the repeated use of the triangle inequality, we have
\begin{align*}
%line 1
& \area{\hat{\lambda}_{L}^{r+1} - \hat{\lambda}_{\infty}^{r+1}} \\
&\hspace{.15in} \leq \sum_{ij} Y_{ij}
\int_{\mathcal{M}}\int_{S_{ij}} 
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}} 
\left| \left( \dfrac{\bar{\lambda}_{L}^{r}(u)}{\int_{S_{ij}}
\bar{\lambda}_{L}^{r}(v)\dee{v}} - \dfrac{\hat{\lambda}_{\infty}^{r}(u)}{\int_{S_{ij}}
\hat{\lambda}_{\infty}^{r}(v)\,\mathrm{d}v}\right)\right| \dee{u}\dee{x}\\
%line 3
&\hspace{.15in} \leq \sum_{ij} Y_{ij} 
\left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)\,\mathrm{d}v \right)^{-1}
\int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\left | b_{ij}^{r} \bar{\lambda}_{L}^{r}(u)-\hat{\lambda}_{\infty}^{r}(u) \right
|\dee{u} \,\mathrm{d}x \\
%line 5
&\hspace{.15in} \leq \area{\map} \sum_{ij} Y_{ij}
\left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)\,\mathrm{d}v \right)^{-1} 
M_h \norm{b_{ij}^{r} \bar{\lambda}_{L}^{r} - \hat{\lambda}_{\infty}^{r}}.
\end{align*}
The last inequality is again due to Lemma \ref{lemma:bl_functional}. Now since the induction assumption implies $\bar{\lambda}_{L}^{r} \xrightarrow{{\cal L}^1} \hat{\lambda}_{\infty}^{r}$ and
$b_{ij}^{r} \to 1$, we have, for all $i, j$,
\begin{equation*}
\Big|\!\Big| b_{ij}^{r} \bar{\lambda}_{L}^{r} -
\hat{\lambda}_{\infty}^{r} \Big|\!\Big|_1 \leq \Big|\!\Big|
b_{ij}^{r} \, \bar{\lambda}_{L}^{r} - \bar{\lambda}_{L}^{r}
\Big|\!\Big|_1 + \Big|\!\Big| \bar{\lambda}_{L}^{r} -
\hat{\lambda}_{\infty}^{r} \Big|\!\Big|_1 \to 0.
\end{equation*}%
In addition, it is evident that $\hat{\lambda}_{k}^{r+1}$ and $\hat{\lambda}_{\infty}^{r+1}$ belong to $\mathcal{F}_1$ provided that $\hat{\lambda}_{k}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_1$. Hence, we have (\textbf{A}) $\hat{\lambda}_{k}^{r+1} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{r+1}$ on $\mathcal{S}$, and ({\bf B}) $\hat{\lambda}_{k}^{r+1}$, $\hat{\lambda}_{\infty}^{r+1} \in \mathcal{F}_1$ by induction.\\[20pt]

Technically, the assumption, $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$, ensures the ratio to be bounded, which in turn faciliate the repeated use of triangular inequality.
In practice, this assumption requires the data to provide sufficient information that permits proper estimation of $\lambda$ by excluding the time intervals or regions in which the data are noninformative.
For example, in the setting of panel count data in which $\off(x)$ indicates the number of at-risk subject at time $x$, the proposed method can be employed to estimate an intensity 
only when there is at least one at-risk subject with nearby time interval or the partition under consideration. This is certainly the case in the most of the applications.
In the other setting of area censored data in which $\off(x)$ represents the expected number of events at location $x$ when the spatial information was ignored, 
the assumption will allow the proposed method to properly estimate a risk surface only when there is some indication of occurrence of the event in the neighbourhood of $x$. This also effectively excludes regions, such as lake or airport, where the probability of the occurrence of event equals 0.
Finally note the above result may be immediately extended beyond the locally constant case to local linear, quadratic and so on, provided one can give conditions for $\mathcal{H}_x(\lambda)$ to be a bounded linear functional.

The mode of convergence can be further strengthened to uniform convergence should the kernel function be continuous and the initial estimate $\norm{\hat{\lambda}_0}$ be bounded on $\mathcal{M}$.  To see this, note that the mode of the convergence in (\textbf{A}) can be \emph{pointwise}, i.e.\  $\hat{\lambda}_{L}^{r}(x) \longrightarrow \hat{\lambda}_{\infty}^{r}(x)$ for all $x \in \mathcal{M}$. 
By the contiuity of the kernel implies for each $\epsilon >0$, there exists a $\delta >0$ such that, for all $x, y \in \map$,
$$
\norm{ \int \lambda(u) K_h(u-x)\dee{u} - \int \lambda(u) K_h(u-y)\dee{u} } \leq \epsilon \norm{\lambda} \quad\mbox{whenever $\norm{x-y} < \delta$.} 
$$
This statement holds for all $\lambda \in \mathcal{F}_1$ by the boundedness of $\lambda$. 
According to the definition of equicontinuity, the functional class defined by $\mathcal{H}_x(\lambda)$, where $\lambda \in \mathcal{F}_1$, forms a class of \emph{equicontinuous} functions.
In this case, pointwise convergence implies uniform convergence by Ascoli-Arzel\'{a} Theorem \cite[page 169]{royden1988ra}. We will state this property as a corollary without proof since the proof is very much similar to the one above.

\begin{corollary}
Assume the conditions in Theorem~\ref{t:L1_convergence}.  Furthermore, if $K$ is continuous and $|\!| \hat{\lambda}_0 |\!|_1$ is bounded on $\mathcal{M}$, then $\hat{\lambda}_{L}^{r} \longrightarrow \hat{\lambda}_{\infty}^{r}$ uniformly.
\end{corollary}

\noindent {\bf Remark:} The above results may be immediately extended beyond the locally constant case to local linear, quadratic and so on, provided one can give conditions for $\mathcal{H}_x(\lambda)$ to be a bounded linear functional.

\section{Convergence Conditions}

{\tt Density estimation:}
Consider the local-\textsc{em} method for density estimation.   In this context, our aim is twofold. First, we show that, for a fixed $\boldsymbol{\Lambda}$, the upper bound can be made as small as possible by increasing $h$.  Second, given a bounded parameter space, the local-\textsc{em} algorithm is globally convergent when $h$ is sufficiently large.  

By expressing (\ref{e:spectral_upper}) in terms of $p_\ell = \int_{Q_\ell} f(u) \dee{u} \}$, we have
\begin{align}
\gamma(\Lambda) \leq \sum_k \left| \sum_{i} \dfrac{1}{\area{Q_k}} \dfrac{\sum_{t
\ne k} \mathcal{I}_{ik} \mathcal{I}_{it} (p_{t} -
p_{k})}{\left(\sum_{\ell} \mathcal{I}_{i\ell}
p_\ell \right)^2} \right|  \max_s \int_{Q_s} \int_{Q_k}K_h(u-x) \dee{u} \dee{x}.
\label{e:spectral_upper_den}
\end{align}
Since the kernel is symmetrical, 
\begin{align}
\max_s \int_{Q_s} \int_{Q_k} K_h(u-x) \dee{u} \dee{x} &= \int_{Q_k} \int_{Q_k} K_h(u-x) \dee{u} \dee{x} \notag \\
&= \area{Q_k} \int_{(t_{k-1} - x^{\ast})/h }^{(t_k - x^{\ast})/h} K(z) \dee{z} \quad \text{with some $x^{\ast} \in Q_k$}
\label{e:kernel_limit_1}
\end{align}
We obtain the following by taking the limit:
\begin{align*}
\lim_{h \to \infty} \area{Q_k} \int_{(t_{k-1} -x^{\ast})/h }^{(t_k - x^{\ast})/h} K(z) \dee{z} 
&= \lim_{h \to \infty} \area{Q_k} \Big(\int_{-\infty}^{(t_k - x^{\ast})/h} K(z) \dee{z} - \int_{-\infty}^{(t_{k-1} -x^{\ast})/h } K(z) \dee{z} \Big) \\
&= \area{Q_k}\Big(\int_{-\infty}^{0} K(z) \dee{z} - \int_{-\infty}^{0} K(z) \dee{z} \Big) = 0 
\end{align*}
Thereby, the upper bound shrinks toward 0 as $h$ increases.  The shrinking spectral radius, in turn, accelerates the convergence of the local-\textsc{em} algorithm. Moreover, the upper bound (\ref{e:spectral_upper_den}) is a decreasing function of $h$. To see this, suppose $0 < h_1 < h_2$. Then
\begin{align}
&\int_{J_s} \int_{J_k}K_{h_1}(u-x) \dee{u} \dee{x} - \int_{J_s} \int_{J_k}K_{h_2}(u-x) \dee{u} \dee{x} \notag \\
&\hspace{1in}= \int_{J_s} \Bigg( \int_{(t_{k-1} -x)/h_1 }^{(t_k - x)/h_1} K(z) \dee{z}  - \int_{(t_{k-1} -x)/h_2 }^{(t_k - x)/h_2} K(z) \dee{z} \Bigg) \dee{x}  \notag \\
&\hspace{1in} = \int_{J_s} \Bigg( \int_{(t_{k} -x)/h_2 }^{(t_k - x)/h_1} K(z) \dee{z} + \int_{(t_{k-1} -x)/h_1 }^{(t_{k-1} - x)/h_2} K(z) \dee{z} \Bigg) \dee{x}  \geq 0 \notag 
\end{align}
This implies that, provided that the parameter space is bounded, there exists an $H>0$ such that, for all $h>H$, the upper bound (\ref{e:spectral_upper_den}) is less than one for all $p_\ell$.    In this case, the \textsc{ems} mapping is a contraction mapping, and the local-\textsc{em} algorithm is globally convergent in the parameter space, as conjectured in \cite{braun2005lld}.

\vspace{20pt}
\noindent
{\tt Intensity estimation:}
In the context of intensity estimation , the convergence of the local-\textsc{em} algorithm is more complicated, and we can only demonstrate that the local-\textsc{em} algorithm is locally convergent based on the result given by \cite{Green:1990rt}. Let $\gamma$ denote the spectral radius of the \textsc{ems} mapping at the fixed point $\hat{\boldsymbol{\Lambda}}$. In this case, \cite{Green:1990rt} shows that $\gamma < 1$ for all $h>0$. This implies that local-\textsc{em} iterations converge at least in the neighbourhood of $\hat{\boldsymbol{\Lambda}}$. Our experience suggests that, when $h$ is sufficiently large, the iteration of the local-\textsc{em} algorithm never fails to converge. In addition, the local-\textsc{em} requires fewer steps  than the \textsc{em} algorithm to meet convergence criteria. 

\section{Proof of Proposition \ref{t:L1_convergence_equivalent_kernel}}

The proof is very much similar to the one for Theorem~\ref{t:L1_convergence}.  However, it requires  another useful result in integral theories, which will be stated as a lemma as follows:
\begin{lemma} \label{lemma:integral_op1}
Let $\gamma_h(u, x) = g(x) K_h(u-x) f(u)$, where $f$, $g \in \mathcal{L}^1$ and $K_h \in \mathcal{L}^\infty$. Then $\gamma(u, t)$ is an
$\mathcal{L}^1$ function on $\Re^2$ with
\[
\iint \left| \gamma_h(u, x) \right| \, \mathrm{d}u \, \mathrm{d}x \leq
M_h \cdot \norm{g} \cdot \norm{f} .
\]
\end{lemma}

\vspace{20pt}
\noindent \textbf{Proof of Proposition~\ref{t:L1_convergence_equivalent_kernel}:} Let $r=1$. Assume $\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u} \geq c > 0$. By the triangle inequality and Lemma \ref{lemma:integral_op1}, we have
%
\begin{align*}
&\norm{\hat{\lambda}_{J}^{1} - \hat{\lambda}_{\infty}^{1}} %= n^{-1} %% line2
\leq \sum_{ij} Y_{ij} \int_{\mathcal{M}}\int_{S_{ij}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\left| \dfrac{\bar{\lambda}_{0}^{1/2}(u) \,
\bar{\lambda}_{0}^{1/2}(x) - \hat{\lambda}_{0}^{1/2}(u) \,
\hat{\lambda}_{0}^{1/2}(x) }{\int_{S_{ij}} \hat{\lambda}_{0}(v) \,
\mathrm{d}v} \,\right| \dee{u} \dee{x} \\
&\\
% line 3
&\hspace{.15in} \leq \sum_{ij} Y_{ij} \left( \int_{S_{ij}} \hat{\lambda}_{0}(v) \dee{v} \right)^{-1} \left\{
\int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\bar{\lambda}_{0}^{1/2}(x) \left| \bar{\lambda}_{0}^{1/2}(u) -
\hat{\lambda}_{0}^{1/2}(u) \right| \, \mathrm{d}u \, \mathrm{d}x \right. \\
%
& \hspace{2in} + \left. \int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}}
\hat{\lambda}_{0}^{1/2}(u) \left| \bar{\lambda}_{0}^{1/2}(x) -
\hat{\lambda}_{0}^{1/2}(x) \right| \, \mathrm{d}u \, \mathrm{d}x \right\} \\
%%
&\\
%%
&\hspace{.15in} \leq \area{\map} \sum_{ij} Y_{ij} \left( \int_{R_{ij}} \hat{\lambda}^{0}(v) \dee{v} \right)^{-1} M_h \left( \norm{(\bar{\lambda}^{0})^{1/2}} + \norm{(\hat{\lambda}^{0})^{1/2}} \right) \norm{(\bar{\lambda}^{0})^{1/2} - (\hat{\lambda}^{0})^{1/2}} 
\end{align*}
By Lemma \ref{lemma:piece_approx} and the continuous mapping theorem (\textsc{cmt}),  $\norm{\bar{\lambda}_{0}^{1/2} - \hat{\lambda}_{0}^{1/2}} \to 0$. Therefore, $\hat{\lambda}_{J}^{1} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{1}$. Furthermore, if we choose
$\hat{\lambda}^0$ to be bounded above, then $\hat{\lambda}_{J}^{1}$ and $\hat{\lambda}_{\infty}^{1}$ will be also bounded. This, in turn, ensures that $\hat{\lambda}_{J}^{1},\ \hat{\lambda}_{\infty}^{1} \in \mathcal{F}_2$.\\[20pt]
%
\noindent{\it Induction Step:}
%
Assume that $\hat{\lambda}_{J}^{r} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{r}$, $\hat{\lambda}_{J}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_2$, and bounded. Then the induction assumption immediately implies that
$$
c_{ij}^{r} = \frac{\int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(v)
\, \mathrm{d}v}{\int_{S_{ij}} \hat{\lambda}_{J}^{r}(v) \dee{v}} \to 1\ \mbox{for all $i, j$ and }
(\bar{\lambda}_{J}^{r})^{1/2} \stackrel{\mathcal{L}^1}{\longrightarrow}
(\hat{\lambda}_{\infty}^{r})^{1/2}\ \mbox{on $\mathcal{M}$.}$$
Similar to part (I), Lemma \ref{lemma:piece_approx} and \ref{lemma:integral_op1} imply that
%
\begin{align*}
& \norm{\hat{\lambda}_{J}^{r+1} - \hat{\lambda}_{\infty}^{r+1}}\\
% line 2
&\hspace{.15in} \leq \sum_{ij} Y_{ij} \left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(x) \dee{x} \right)^{-1} \int_{\mathcal{M}} \int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}} \left| c_{ij}^{r} \left[\bar{\lambda}_{J}^{r}(u) \, \bar{\lambda}_{J}^{r}(x)\right]^{1/2} \right.\\
& \hspace{4.5in}- \left.\left[\hat{\lambda}_{\infty}^{r}(u) \, \hat{\lambda}_{\infty}^{r}(x)\right]^{1/2} \right| \, \mathrm{d}u \, \mathrm{d}x \\
&\\
% line 3
&\hspace{.15in} \leq \sum_{ij} Y_{ij} \left(\int_{S_{ij}} \hat{\lambda}_{\infty}^{r}(x) \, \mathrm{d}x \right)^{-1}
\int_{\mathcal{M}} \int_{\mathcal{M}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} \off(u) K_h(u-x)\dee{u}} \left\{ c_{ij}^{r}
\left( \bar{\lambda}_{J}^{r}(x) \left| \bar{\lambda}_{J}^{r}(u) - \hat{\lambda}_{\infty}^{r}(u) \right|\right)^{1/2} \right. \\
% line 4
& \hspace{.9in} + \left. \left| c_{ij}^{r} - 1 \right|
\left[\hat{\lambda}_{\infty}^{r}(u) \bar{\lambda}_{J}^{r}(x)\right]^{1/2} + \left(\hat{\lambda}_{\infty}^{r}(u) \left|
\bar{\lambda}_{J}^{r}(x) - \hat{\lambda}_{\infty}^{r}(x) \right| \right)^{1/2} \right\} \dee{u} \dee{x} \\
&\\
% line 5
&\hspace{.15in} \leq  \area{\map} \sum_{ij} Y_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_{\infty}^{r}(x) \, \mathrm{d}x \right)^{-1} M_h\Bigg[
|c_{ij}^{r}| \norm{(\bar{\lambda}_{J}^{r})^{1/2}} \cdot 
\norm{(\bar{\lambda}_{J}^{r})^{1/2} - (\hat{\lambda}_{\infty}^{r})^{1/2}} \\
% line 6
& \hspace{.9in} + |c_{ij}^{r} - 1| 
\norm{(\bar{\lambda}_{J}^{r})^{1/2}} \cdot \norm{(\hat{\lambda}_{\infty}^{r})^{1/2}} + \norm{(\hat{\lambda}_{\infty}^{r})^{1/2}} \cdot \norm{(\bar{\lambda}_{J}^{r})^{1/2} - (\hat{\lambda}_{\infty}^{r})^{1/2}}
\Bigg] \to 0.
\end{align*}
%
Provided that $\hat{\lambda}_{J}^{r}$ and $\hat{\lambda}_{\infty}^{r}$ are bounded, $\hat{\lambda}_{J}^{r+1}$ and $\hat{\lambda}_{\infty}^{r+1}$ are bounded, implying that $\int_{\mathcal{M}} (\hat{\lambda}_{J}^{r})^{1/2} < \infty$ and $\int_{\mathcal{M}} (\hat{\lambda}_{\infty}^{r+1})^{1/2} < \infty$.
%
%
It follows that ({\bf A}) $\hat{\lambda}_{J}^{r}(x) \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{r}(x)$, and ({\bf B}) $\hat{\lambda}_{J}^{r}$, $\hat{\lambda}_{\infty}^{r} \in \mathcal{F}_2$ by induction.

\end{document}