\documentclass[12pt]{article}
\usepackage{layout,pdfsync,latexsym, array, enumerate, amsmath, amsthm,amssymb, amsfonts,natbib, subfigure}
\usepackage[mathscr]{eucal}
\usepackage{epsf,epsfig}

\bibliographystyle{apalike}

\textwidth 6.5in \textheight 9.00in \oddsidemargin -0.15in
\evensidemargin -0.15in \topmargin -0.25in
\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}{Corollary}[theorem]
\newtheorem{example}{Example}[section]
\newtheorem{lemma}{Lemma}[section]
\newtheorem{defn}{Definition}[section]
\newcommand{\lowtilde}[1]{\mathop{#1}\limits_{\textstyle\tilde{}}}
%\renewcommand{\baselinestretch}{1.4}


\newcommand{\off}{O}
\newcommand{\poly}{{\cal P}(u-x;\mathbf{a})}
\newcommand{\lik}{\ensuremath{\mathcal{L}}}
\newcommand{\map}{\ensuremath{\mathcal{M}}}

\begin{document}
\title{Local Likelihood and the EMS Algorithm}
\author{Chun-Po Steve Fan\footnote{Chun-Po Steve Fan is a doctoral student in the Department of Public Health Sciences at the University of Toronto}, Jamie Stafford\footnote{University of Toronto} and Patrick E. Brown\footnote{University of Toronto and Cancer Care Ontario} }
\thispagestyle{empty}
\date{}
\maketitle
%\renewcommand{\baselinestretch}{1.4}
\begin{abstract}
The use of local likelihood methods \cite{tibshirani1987ll,
loader1996lld} in the presence of data that is either interval/area
censored, or has been aggregated into bins, leads naturally to the
consideration of EM-type strategies, or rather local-EM algorithms.
We begin by exploring a class of local-EM algorithms for density
estimation~\cite{ braun2005lld} where one member of this class
retains the simplicity and interpretive appeal of the usual kernel
density estimate. We demonstrate that using a particular conditional
distribution at the E-step results in the algorithm collapsing
explicitly into an EMS algorithm of the type considered by \cite{
silvermanems}. This is true for the entire local likelihood class
and may be extended to other classes. In particular we consider the
local likelihood class for intensity estimation and generalize the
self-consistency algorithm of \cite{ Hu2008gls} for panel count
data.

The advantages of identifying a relationship between local
likelihood and the EMS algorithm is that the former provides a
natural context for the latter, which is often referred to as ad hoc
in the literature, while the latter provides a set of tools to guide
the use, and implementation, of local-EM algorithms. For example, we
expose a previously unknown connection between local-EM algorithms
and penalized likelihood that is analogous to the more familiar
pairing of EM and likelihood.
\end{abstract}

\vspace{10pt} \small \it Keywords: density estimation; intensity
estimation; interval/area censoring; local-EM; panel counts; penalized
likelihood; self-consistency \rm \normalsize



\section*{Notes}

\begin{itemize}
    \item $i = 1 \ldots n$ denotes map, or individual

\item $j = 1 \ldots J_i$ denotes cell $S_{ij}$ in a tessellation of map $i$ or panels for individual $i$.

\item $k = 1 \ldots K_i$ is the observed events $X_{ik}$ or $T_{ik}$ for individual or in map $i$

\item $\ell$ is an age-sex group

\item $m = 1 \ldots M$ denotes cell $Q_m$ in a partition over all $i$

\item $\lambda(t)$ is the true intensity or density, and $\hat\lambda(t)$ is it's estimate.  $\Lambda(t)$ is the cumulative intensity or CDF.

\item $Y$ denotes a case count

\item $Z(t)$ is a dropout indicator
\end{itemize}

I've added a lot of section headings, just to keep things clear, we can remove some of them later.

\section{Introduction}
In this paper we consider extending the methods of \cite{
braun2005lld} for density estimation to situations where an
intensity function is the object of interest. Developments
eventually lead to a generalization of the self-consistency
algorithm of \cite{ Hu2008gls}. Here data may be interval censored,
it may be temporal and come in the form of panels counts, or it may
be spatial and area censored. Whatever form the censoring takes, the
use of local likelihood techniques naturally leads to the
consideration of local-EM algorithms.

In situations where data are interval censored, \cite{ braun2005lld}
proposed a naive kernel density estimator, embedded it in a local
likelihood class where it was recognized as a local-EM algorithm,
and demonstrated that it was a generalization of the
self-consistency algorithm of Li \emph{et al.}\ (1997). However,
despite these interesting developments, convergence of the local-EM
algorithm was difficult to demonstrate and it was not clear whether
its fixed point maximized any particular criterion.

In this paper we also consider simplifying the E-step of a local-EM
algorithm by approximating conditional expectations using a
piecewise constant density function. This results in the local-EM
algorithm collapsing explicitly into an EMS algorithm where a
smoothing step is added to the expectation and maximization steps of
the usual EM algorithm \cite{ silvermanems}. This has two
advantages. First it embeds the EMS algorithm in the local
likelihood context where it is seen to arise naturally from the
consideration of EM-type strategies. \cite{silvermanems} had
originally referred to the EMS algorithm as ad-hoc but it's
relationship to local likelihood suggests
otherwise\footnote[1]{\cite{nychka1990spa} demonstrates that a
modified EMS algorithm is related to penalized likelihood. As a
result he also suggests that the EMS algorithm is not ad-hoc.}.
Secondly, the EMS algorithm has been extensively studied, and much
is known about its convergence (Latham, 1994, 1995, 1996) and its
relationship to penalized likelihood (Nychka, 1990). The latter
suggests a previously unknown connection between local-EM algorithms
and penalized likelihood analogous to the more familiar pairing of
EM and likelihood.

For the sake of clarity we initially separate the
treatment of density and intensity estimation, although \S 2
provides the notation used for both cases. In \S 3 we return to the
setting of Braun, Duchesne and Stafford (2005) and demonstrate that
simplifying the E-step in their local EM algorithm results in an
EMS, or adaptive EMS algorithm. Here the relationship with the EM
algorithm of Turnbull (1976) becomes explicit and we explore
convergence properties using the results of Latham (1994, 1995, 1996). The
treatment of intensity estimation in \S 4 resembles the details of
\S 3 due to similarities in local likelihoods, however the context
is much broader. Here we focus on the temporal context, where data
comes in the form of panel counts, but the algorithms derived are virtually
identical to what would be used in the spatial context where data
may be area censored. A full treatment of the spatial context in an
epidemiological setting may be found in Brown, Fan and Stafford
(2008). A brief simulation study is given in \S 5.

In \S 6 we present results meant to strengthen the suggestion that,
for the context considered in this paper, local-EM and penalized
likelihood may be paired in a manner analogous to the pairing of EM
and likelihood. In \S 6.1 we demonstrate that the use of an equivalent
kernel in a local-EM algorithm leads to the modification necessary
to maximize of a penalized likelihood (Nychka 1990). In \S 6.2 we prove
the uniform
convergence of the EMS iterate to its local-EM counterpart. This,
and the developments of \S 3 \& 4, suggest local-EM and EMS
techniques may be thought of synonymously. Finally, in \S 6.3 we study
the penalty of \S 6.1
under the conditions of \S 6.2. Final remarks are given in \S 7.

\section{Some initial details}

\subsection{Processes in time}

In the context of this paper we assume a study consists of $n$
independent subjects where the time of an event, or a recurring
event, is of interest. Each subject is observed at a set of times
${\cal T}_i=\{\tau_{ik}, k = 1 \ldots K_i\}$ determined by a visit
process assumed to be independent of the event time process. The
event time process is assumed to be either a failure time process or
a non-homogeneous Poisson process. In either case, the counting
process $N_i(t)$ gives the number of events up to time $t$ for the
$i$th subject. Interest lies in the expected value
$$
\Lambda(t)=E[N_i(t)]
$$
which is assumed to be common to each subject in the study. When the
event time process is a failure time process, $N_i(t)$ is an
indicator process for the failure time $T_i$, and $\Lambda(t)$ is
the cumulative distribution function for $T_i$ with density $\lambda(t)$.
Otherwise $\Lambda(t)$ is a cumulative intensity function for the
Poisson process with intensity $\lambda(t)$.

{\tt can we avoid defining the cumulative density or intensity?  we don't use it after this I don't think}

\subsubsection{Failure time processes}

In the case of a failure time process the event time $T_i$ is
assumed to either fall between two adjacent elements of ${\cal T}_i$
or to be right censored. In either case $T_i$ is interval censored
where we denote the relevant interval as $S_i=[L_i,R_i]$ where
$L_i,R_i\in{\cal T}_i\cup \{\infty\}$ and of course $T_i\in S_i$.
Note that if the event time for the $i$th subject is right censored
we set $R_i=\infty$. The observed data is then a sequence of
independent intervals $S_1 \ldots S_n$ some of which may overlap. We
let ${\cal Q}=\{Q_m; m=1 \ldots,M \}$ denote the partition of the data
defined by the collection of endpoints $\{L_i,R_i;i=1,\ldots,n\}$.
For example, if $n=2$ and $S_1=[0,3],~S_2=[1,2]$ then we would have
${\cal Q}=\{[0,1],[1,2],[2,3]\}$.

The goal is to estimate the density function $\lambda(t)$ of the failure times $T_i$.  To reduce the estimation to a finite dimensional problem, we focus on estimating the discretization
\[
\bar\lambda(t) = \int_{Q_m} \lambda(u)\,\mathrm{d}u / ||Q_m||;\  t \in Q_m.
\]
Here $\bar\lambda(t)$ may
be formally referred to as the ${\cal Q}$-approximant of $\lambda(t)$
\citep{royden1988ra}
%(Royden, 1988).


\subsubsection{Poisson processes in time}

In the case of a Poisson process event times are still interval
censored but we may have multiple events in each interval. For
subject $i$ we denote the times of these events as $T_{ik}$ where
$k=1,\ldots, K_i$ and $K_i$ denotes the number of events observed
for the $i$th subject. In this setting
$S_{ij}=[\tau_{ij},\tau_{ij+1}]$ is referred to as the $j$th panel
for the $i$th individual and we denote the number of events in the
interval $S_{ij}$ by $Y_{ij}=||k;T_{ik} \in S_{ij}||$.
Following the setup of \cite{Hu2008gls} we let
$${\cal T}=\cup_i^n {\cal T}_i=\{\tau_m; m= 0 \ldots M\}$$
and again
let ${\cal Q}=\{Q_m; m=1 \ldots M\}$ denote a partition of the data
where now $Q_m=[\tau_{m-1},\tau_m]$.


The $\tau_{ij}$ are free to differ
indicating the potential for subjects to have different dropout
times. To accommodate this we define $Z_i(t)$ as an indicator of
dropout for the $i$th subject and set $Z(t)=\sum_i Z_i(t)$. We
assume $Z_i(t)$ is a monotone process for all $i$ and make the
further assumption that dropout can only occur at a time $\tau_m \in {\cal
T}$.

The aim here is to estimate the intensity function $\lambda(t)$ of the $T_{ik}$.  Again, we estimate the ${\cal Q}$-approximant $\bar\lambda(t)$ which is constant within each $Q_m$.


\subsection{Spatial Poisson point processes}
\label{sec:spatialIntro}

Consider a series of inhomogeneous Poisson point process
$X_i=\{x_{ik} ; k=1 \ldots K_i\}$ in  overlapping regions $M_i
\subset \Re^d$.  The intensity surface of $X_i$ takes the form
$$
\rho_i(s) = O_i(s) \lambda(s)
$$
where $O_i(s)$ is an offset surface known \emph{a priori} and
$\lambda(s)$ is assumed to be smooth.  In disease mapping
applications, the $X_i$ are the residential locations of individual
cases during the $i$th census period, $O_i(s)$ is the population
density in the region during this period (which is not smooth), and
$\lambda(s)$ is the smoothly-varying risk surface due perhaps to
spatially varying social or environmental conditions. The
log-likelihood function given the locations $X$ is
\begin{equation}\label{eq:fulllikelihood}
\lik(\lambda; X) = \sum_{i=1}^n \left\{\sum_{k=1}^{K_i} \log[\lambda(x_{ik})] - \int_{M_i} O_i(s) \lambda(s) ds \right\}+C,
\end{equation}
where the constant term is  $C=\sum_{ik} \log[O(x_{ik})]$.

The motivation of this paper is estimating $\lambda(x)$ when the
locations $x_{ik}$ are area-censored, with the data consisting of
sets $R_{ik}$ and $x_{ik} \in R_{ik}$. An important special case is
where $M_i$ is composed of a tessellation of sets $\{S_{ij},
j=1\ldots J_i\}$.  For the disease mapping application, the $M_i$
correspond to a city boundary in census period $i$, the $S_{ij}$
are the census regions within the city (which vary between census
periods), and $Y_{ij}$ is the number of cases in $S_{ij}$.

Again the intensity $\lambda(x)$ is approximated by a piecewise constant function $\bar\lambda(x)$ within a prediction tessellation ${\cal Q}$, with
\[
\bar\lambda(x) = \int_{Q_m} \lambda(u)\,\mathrm{d}u / ||Q_m||;\  x \in Q_m.
\]
The $Q_m$ must be disjoint, the boundaries of the $Q_m$ must not cross those of the $S_{ij}$, and each of the maps $M_i$ must be entirely covered by ${\cal Q}$.  One way of accomplishing this is for ${\cal Q}$ to be the regions within a partition obtained by overlaying the boundaries of each of the $S_{ij}$.  When the boundaries of the $S_{ij}$ correspond to edges on a lattice (or the $S_{ij}$ are approximated by forcing them to lie on a lattice), the $Q_m$ can be the cells within the lattice.


The censoring leads to the consideration of local-EM algorithms
that, upon implementation, result in explicit EMS algorithms of the
type considered in \citet{silvermanems}. This observation not only
provides a formal motivation for the EMS algorithm, but it also
allows us to pair local-EM with penalized likelihood in a manner
analogous to the pairing of EM and likelihood. Much of the detail
for density and intensity estimation has a common structure which we
exploit by presenting a general treatment in this section. The
distinctiveness of each case is exposed in the examples of the next
section.



\section{Local-EM for density and intensity estimation}

\subsection{Local-EM and the EMS algorithm}\label{sec:llems_alg}
Denote by $\lambda$ the density or intensity function of interest.
Local likelihoods suitable for the flexible estimation of $\lambda$ at a fixed location $x$ have the general form
\begin{eqnarray}\label{llge}
{\cal L}[\lambda(x)]=\sum_{ik} K_h(X_{ik}-x) \log\{\lambda(X_{ik})\}
- \sum_{i}\int_{\cal M} \off_i(u)K_h(u-x)\lambda(u) \,\mathrm{d}u.
\end{eqnarray}
$K_h(z)= K(z/h)/h$ is a symmetric positive kernel function with
$\int K(z) \,\mathrm{d}z = 1$, and $O_i(x)$ is a function that
increases the flexibility of (\ref{llge}) as described in Section \ref{sec:spatialIntro}. Following \cite{loader1996lld} we consider the
polynomial approximation with coefficients $\mathbf{
a}=\{{a}_{0},{a}_{1},\ldots,{a}_{p}\}$ centred around $x$ with
\begin{eqnarray*}
\log\{\tilde\lambda(u;x, \mathbf{a})\}={\cal P}(u-x;\mathbf{a})=\sum_{j=0}^p a_j(u - x)^j.
\end{eqnarray*}
Substituting $\tilde\lambda(u;x,\mathbf{a})$ for $\lambda(\cdot)$ in (\ref{llge}) and maximizing the likelihood with respect to  $\mathbf{a}$ yields the estimate
$\hat\lambda(x) = \tilde\lambda(x;x,\hat{\mathbf{a}} )=  \exp(\hat{a}_0)$.

When the data are interval or area censored with $X_{ik} \in R_{ik}$, one may consider
replacing (\ref{llge}) with
\begin{multline}\label{llicdp}
{\cal L}({\bf a};x)=\sum_{ik}E_\lambda [K_h(X_{ik}-x){\cal P}(X_{ik}-x;\mathbf{a})|X_{ik}\in R_{ik}]\\
 - \sum_{i}\int_{\cal M}O_i(u) K_h(u-x)
\exp\{{\cal P}(u - x;\mathbf{a}) \} \, \mathrm{d}u
\end{multline}
leading to a local-EM algorithm that cycles through two steps at
each iteration:
\begin{quote}
\begin{description}
\item[E-step:] compute the relevant expectations using the current
estimate  $\hat\lambda(\cdot)$
\item[M-step:] maximize ${\cal L}({\bf a};x)$ to get updated estimates of
$\mathbf{a}$ and hence $\hat\lambda(u) = {\cal P}(u-x;\mathbf{a})$.
\end{description}
\end{quote}
The algorithm differs from a typical EM algorithm because, at the
E-step, expectation is computed with respect to an estimate of the
infinite dimensional parameter $\lambda$ while, at the M-step, we only
estimate this parameter locally at $x$. As such the typical
arguments concerning convergence of the EM algorithm cannot be
brought to bear. Furthermore, if the local-EM algorithm converges to
a fixed point $\hat{\lambda}$, it isn't clear what criterion this fixed point
optimizes.

When the $R_{ik}$ are elements of a tessellation $S_{ij}$ of ${\cal M}_i$, write $Y_{ij} = |k; R_{ik} = S_{ij}|$ and the local-EM algorithm can be written as
\begin{equation}
\label{eq:lambdarp1}
\hat{\lambda}^{(r+1)}(x)=\sum_{ij}
Y_{ij}\mbox{E}_{\hat{\lambda}^{(r)}}\left[ \left. K_h(X -x ) \right| X
\in S_{ij}\right]/\Psi_h(x; \hat{\bf a}^{(r)})
\end{equation}
where
\begin{eqnarray*}
\Psi_h(x; \bf a)&=&\sum_{i}\int_{ {\cal M}_i} O_i(u)K_h(u-x)\exp\left\{ \poly -a_0\right\}\, \mathrm{d}u
\end{eqnarray*}
and ${\bf \hat{a}}^{(r)}$ solves the local likelihood equations based
on ${\cal L}({\bf a};x)$ with $\lambda$ replaced by
$\hat{\lambda}_r$.  The algorithm is then iterated until $\hat\lambda^{(r)}$ converges.


To evaluate the expectation in (\ref{eq:lambdarp1}), the conditional probability of an event being in a region $Q_m$
\[
p_{ijm} = pr(X \in Q_m | X \in S_{ij})
\]
is needed.  Clearly $p_{ijm} = 0$ unless $Q_m \subset S_{ij}$.
Recall that the discretized version $\bar\lambda(x)$ of $\lambda(x)$ is estimated, which is equivalent to estimating the quantities
 A given event's location has density proportional to the intensity function $\rho_i(x) = O_i(x) \lambda(x)$.  As will be explained in Section {\tt on ems},  the assumption that the offsets $O_i(x)$ are constant with regions $S_{ij}$ is necessary.
 Writing
 \[
\Lambda_m = \int_{Q_m} \lambda(x) dx,
\]
this assumption results in  the conditional probability reducing to
\[
p_{ijm} = \Lambda_m\left/\sum_{n;Q_n \subset S_{ij}}\Lambda_n \right. .
\]
Finally note that the the distribution of an event within $Q_m$ is, under the piecewise constant intensity $\bar\lambda(x)$, uniform with density $1/||Q_m||$.

Splitting the expectation into the distribution of $X$ within a cell $Q_m$ and the distribution of the $Q_m$ containing $X$ within the censoring region $S_{ij}$ gives:
\begin{align*}
\mbox{E}_{\bar{\lambda}}\left[ \left. K_h(X -x ) \right| X
\in S_{ij}\right] &=
\sum_{m;Q_m \in S_{ij} } p_{ijm} \mbox{E}_{\bar{\lambda}}\left[ \left. K_h(X -x ) \right| X
\in Q_m \right]
\\
&=
\sum_{m;Q_m \in S_{ij} } p_{ijm} \int_{Q_m} K_h(u -x ) / \|Q_m\| du
\\ &=
\left. \sum_{m;Q_m \in S_{ij} } \frac{\Lambda_m}{||Q_m||} \int_{Q_m} K_h(u -x ) du \right/ \sum_{m;Q_m \subset S_{ij}}\Lambda_m.
\end{align*}
Returning to (\ref{eq:lambdarp1}),  at the next iteration we are required to compute
$\hat{\Lambda}_{r+1\, s}$.  This leads to the simple iteration
\begin{equation}\label{EMS}
\hat\Lambda^{(r+1)}_m =
\sum_{ij} \sum_{n;Q_n \in S_{ij} }
\frac{Y_{ij} \hat\Lambda_m^{(r)} }{\sum_{n;Q_n \subset S_{ij}} \hat\Lambda_n^{(r)} }
 \left(
  \left(\|Q_n\| \right)^{-1}
   \int_{Q_m}
    \frac{\int_{Q_n} K_h(u -x ) du }{ \Psi_h(x; \hat{\mathbf{a}}^{(r)}) } dx
 \right)
\end{equation}
This may be expressed in terms of matrices as
\begin{equation}\label{EMSic}
\hat{\bf \Lambda}^{(r+1)}={\cal M}(\hat{\bf \Lambda}^{(r)}) {\cal K}_h(\hat{\bf \Lambda}^{(r)}),
\end{equation}
where ${\cal K}_h$ is a $k \times k$ smoothing matrix with
entries

{\tt where did the offsets come from here?}
\[
{\cal K}_{mn}=
\frac{\sum_i\off_{i}(Q_m)}{|\!|Q_m|\!|}
\int_{J_n}
\frac{\int_{J_m}K_h\left({u-x}\right)\,
\mathrm{d}u}{\Psi_h(x; \hat{\mathbf{a}}^{(r)})}\, \mathrm{d}x,
\]
 and ${\cal M}(\hat{\bf \Lambda}^{r})$ is a $k$ dimensional row vector whose
$m$th entry is
\[
{\cal M}(\hat{\bf \Lambda}^{r})_m = \left[\sum_i O_i(Q_m)\right]^{-1} \sum_{ij} \dfrac{Y_{ij} \hat\Lambda^{(r)}_m}{\sum_{n;Q_n \in S_{ij}}  \hat{\Lambda}^{(r)}_n }  .
\]


The latter is recognized as a step in an EM algorithm, and hence the
iteration (\ref{EMSic}) is seen to explicitly involve an
expectation, maximization \emph{and} smoothing step. That is, our
implementation of the local-EM algorithm has resulted explicitly in
an EMS algorithm. Note that ${\bf \hat{a}}_{r}$ depends on $\hat{\bf
\Lambda}_r$, and so does the smoothing step. Hence we refer to
algorithm as adaptive. However, in the locally constant case in
which the polynomial $\cal P$ is truncated at its leading term,
$\Psi_h$ simplifies to
\[
\Psi_h(x;\mathbf{a})=\sum_{i} \int_{M_i} O_i(u) K_h(u-x)\,
\mathrm{d}u,
\]
and the algorithm is no longer adaptive.

{\tt previous equations}
\begin{align}
\hat{\Lambda}_{r+1 s}&=\sum_{ijl}n_{ij} \dfrac{\hat{\Lambda}_{rl} {\cal
I}_{ijl}}{|\!|J_l|\!|\sum_m \hat{\Lambda}_{rm} {\cal
I}_{ijm}} \int_{J_s} \frac{\int_{J_l}K_h\left({u-x}\right)\,
\mathrm{d}u}{\Psi_h(x;\, \hat{ \bf a}) }\, \mathrm{d}x.
\end{align}


$${\cal K}_{\ell s}=
\frac{\sum_i\off_{i\ell}}{|\!|J_\ell|\!|}\int_{J_s}\frac{\int_{J_\ell}K_h\left({u-x}\right)\,
\mathrm{d}u}{\Psi_h(\hat{\bf a}_{r})}\, \mathrm{d}x,$$

$$
\sum_{ij}n_{ij} \frac{\hat{\Lambda}_{r\ell} {\cal
I}_{ij\ell}}{(\sum_i\off_{i\ell}) \sum_m \hat{\Lambda}_{rm} {\cal
I}_{ijm}}.
$$


\vspace{20pt} \noindent {\bf General Remark:} \bigskip

\noindent As $h\downarrow 0$ the integral $
\int_{J_l}K_h\left({u-x}\right)\, \mathrm{d}u$ becomes the indicator
function $1_{J_l}(t)$ for $J_l$, $\int_0^{\tau_{k_i}} K_h(u-t)\,
\mathrm{d}u \rightarrow Y_i(t)$ and $\int_0^{\infty}Y(u)K_h(u-t)\,
\mathrm{d}u\rightarrow Y(t)$. As a result we have
\[
\lim_{h\downarrow 0}{\cal K}_{ls}=
\lim_{h\downarrow0}\frac{Y(\tau_l)}{||J_l||}
\int_{J_s}\frac{\int_{J_l}K_h\left({u-t}\right)\,
\mathrm{d}u}{\int_0^{\infty}Y(u)K_h(u-t)\, \mathrm{d}u}\,
\mathrm{d}t =\frac{Y(\tau_l)}{||J_l||}\int_{J_s}
\frac{1_{J_l}(t)}{Y(t)}\, \mathrm{d}t =\delta_{jl}.
\]
Consequently ${\cal K}_h$ converges to the identity matrix and the
iteration (\ref{EMSic}) becomes the EM algorithm algorithm of
\cite{Hu2008gls}.

\bigskip
%
\noindent {\bf Remark on an Adaptive EMS Algorithm:} In cases where
local-EM results in an adaptive EMS algorithm the equivalent kernel
remains the same. However, some care is required and we only sketch
details relying to some extent on those given in
\cite{green1990uap}. In the adaptive case, the smoothing matrix $S$
is now replaced by ${\cal K}_h(\boldsymbol{\theta})$ which depends
on $\boldsymbol{\theta}$. This occurs because ${\cal
K}_h(\boldsymbol{\theta})$ involves the solution, $\hat{\bf
a}_{\boldsymbol{\theta}}$, to a set of local likelihood equations
that in turn involve $E_\theta$, or rather an E-step. In reference
to \cite{green1990uap} this implies that $J(\boldsymbol{\theta}')$
of expression (2) is then replaced by
$J(\boldsymbol{\theta}'|\boldsymbol{\theta})$, indicating that both
terms of (2) now involve an E-step. What we claim is that use of the
proposed equivalent kernel yields the one step late algorithm for
expression (2) of Green with $J(\boldsymbol{\theta}')$ replaced with
$J(\boldsymbol{\theta}'|\boldsymbol{\theta})$. What we do not show
is that this algorithm maximizes expression (1) of
\cite{green1990uap}, but we suspect this is true.

\subsubsection{An Upper Bound for the Convergence Rate}

In the locally constant case in which the smoothing matrix
$\mathcal{K}_h$ does not depend on $\boldsymbol{\Lambda}$, we might
demonstrate the convergence of the EMS mapping by showing its
spectral radius at an EMS solution is less than 1. Here, an EMS
solution is referred to any value that satisfies
$\boldsymbol{\Lambda} = \mathcal{M}(\boldsymbol{\Lambda})
\mathcal{K}_h$.  Since \cite{latham1995ems} shows the uniqueness of
the EMS solution in the region where $\Lambda_k > 0$ for all $k$,
the iteration will converge to the unique solution if it converges
at all.

\cite{green1990uap} shows that the spectral radius of the
$\mathcal{M}(\Lambda)$ at an EM solution is less than 1, and
\cite{silvermanems} claims that a smoothing matrix reduces the
spectral radius of $\mathcal{M}(\Lambda)$. Nonetheless, these two
properties are not sufficient conditions for the algorithmic
convergence because the spectral radii are evaluated at different
values of $\boldsymbol{\Lambda}$. Empirical evidences suggest that
the EMS iteration usually converges.  It is also observed that the
EMS converges faster than EM when $h$ is sufficiently large, but
there exist some $h >0$ such that the convergence rate of EMS is
lower than that of EM.

Let $\hat{\boldsymbol{\Lambda}}$ denote the unique EMS solution and
 $\gamma$ be the spectral radius of $\partial
\mathcal{M}=\partial
\mathcal{M}(\hat{\boldsymbol{\Lambda}})/\partial
\boldsymbol{\Lambda}$. Here, we derive an upper bound for $\gamma$
and show this upper bound increasing with respect to the
``roughness'' of $\boldsymbol{\Lambda}$ and decreasing as the value
of bandwidth increases. By the Perroni-Frobenius theorem,
$$
\gamma \leq \max_s \sum_t \left[ \partial \mathcal{M}\,
\mathcal{K}_h \right]_{ts},
$$where $\partial \mathcal{M}$ is an $k \times k$ matrix with
\begin{align*}
[\partial \mathcal{M}]_{ts} &\equiv \dfrac{\partial
\mathcal{M}_s}{\partial \Lambda_t} =\begin{cases} \sum_{ij} Y_{ij}
\dfrac{\sum_{\ell \ne s} \mathcal{I}_{ijs} \mathcal{I}_{ij\ell}
\Lambda_\ell
}{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \Lambda_\ell \right)^2} & \mbox{when $s=t$}\\
& \\
\sum_{ij} Y_{ij} \dfrac{-\mathcal{I}_{ijt} \mathcal{I}_{ijs}
\Lambda_s}{\left(\sum_{\ell} \mathcal{I}_{ij\ell} \Lambda_\ell
\right)^2} & \mbox{otherwise}
\end{cases}
\end{align*}
After some algebraic manipulations, it can be shown that
\begin{align}
\gamma & \le \max_s \sum_k \left( \sum_{ij} Y_{ij} \dfrac{\sum_{\ell
\ne k} \mathcal{I}_{ijk} \mathcal{I}_{ij\ell} (\hat{\Lambda}_\ell -
\hat{\Lambda}_k) }{\left(\sum_{\ell} \mathcal{I}_{ij\ell}
\hat{\Lambda}_\ell \right)^2} \right) \mathcal{K}_{ks}.
\label{e:spectral_upper}
\end{align}
%Note that $\hat{\boldsymbol{\Lambda}}$ is a function of the
%bandwidth value $h$.

\noindent When $Q_j$'s are of equal length, not only does increasing
$h$ narrow the spread of $\hat{\boldsymbol{\Lambda}}$, but it also
makes $\mathcal{K}_{\ell s}$ more uniform with a column. That is,
$$
(\hat{\Lambda}_\ell - \hat{\Lambda}_k) \to 0 \ \forall \ \ell, k
\quad \mbox{and}\quad \mathcal{K}_{\ell s} \to \frac{\sum_i
\off_{i}(Q_\ell)}{\sum_{ik} \off_{i}(Q_k)} \ \forall\ s.
$$
%$$
%(\hat{\Lambda}_\ell - \hat{\Lambda}_k) \to 0 \ \forall \ \ell, k
%\quad \mbox{and}\quad \mathcal{K}_{\ell s} \to \frac{\sum_i
%\off_{i\ell}}{\sum_{ik} \off_{ik}} \ \forall\ s.
%$$
As a result, increasing value of bandwidth pushes the upper bound
toward 0, thus accelerating the algorithmic convergence.


\subsection{The Role of local-EM} \label{sec:llem_role}
Thus far we have exposed an interesting relationship between classes
of algorithms that demonstrates the EMS algorithm arises naturally
from local likelihood considerations. This occurs because of the way
we have chosen to implement the local-EM algorithm. However, we
could have instead chosen to implement this algorithm through
multiple imputation, or by using MCEM, or through some other
favorite techniques. So why EMS?

In this section, we summarize results meant to strengthen the
suggestion that, for the context considered in this paper, local-EM
and penalized likelihood may be paired in a manner analogous to the
pairing of EM and likelihood. Details are given in the appendix.

\vspace{20pt}
%
\noindent {\bf Local-EM and the Modified EMS algorithm:}
\cite{nychka1990spa} identified a relationship between EMS and
penalized likelihood by demonstrating that a modified EMS algorithm
maximizes a penalized likelihood function.  Assume an equally spaced
partition ${\cal Q}$ and let $\theta_{im}^2 = \int_{J_m}
\rho_i(x)\,\mathrm{d}x$, where $\rho_i(x) = \off_i(x)\lambda(x)$.
Omitting parts that do not depends of $\lambda$, the nonparametric
likelihood is given by
\begin{equation} \label{e:npllk}
\mathcal{L}(\boldsymbol{\theta}) = \sum_{ij} Y_{ij} \log\left(
\sum_{k; Q_k \subseteq S_{ij}}\theta_{ik}^2 \right) - \sum_{ik}
\theta_{ik}^2
\end{equation} with the penalty equal to $\boldsymbol{\Theta}^{T} \mathbf{R} \boldsymbol{\Theta}$.

%Moreover, this modified EMS algorithm was very much similar to the one step late algorithm of \cite{green1990uap}.
In Appendix \ref{appx_ems} we demonstrate that, with the appropriate choice of kernel, the local-EM algorithm may be used to maximize a penalized likelihood function. This occurs because the equivalent kernel leads to Nychka's modification of the EMS algorithm.

\bigskip
\noindent {\bf $\mathbf{\mathcal{L}}^1$-Convergence of EMS to local-EM:} In
\S\ref{sec:llems_alg} the discretization over the partition ${\cal
Q}$ resulted in a local-EM algorithm collapsing explicitly into an
EMS algorithm. In appendix \ref{appx_convergence}, we consider this
discretization as $k\to \infty$ and $\max_k |\!|Q_k|\!| \downarrow
0$. We demonstrate that the EMS iteration converges to its
local-EM counterpart in the $\mathcal{L}^1$ norm. This result suggests local-EM and EMS
techniques may be thought of synonymously.

\bigskip
\noindent {\bf Local-EM and Penalized Likelihood:} We study the penalized likelihood of \ref{appx_ems} under the conditions of \ref{appx_convergence} where $\max_k |\!|J_k|\!| \downarrow 0$.  In particular, we interpret the penalty in terms of a class of functions %and consider this class
under limiting conditions.

This ultimately allows us to speculate that the role of local-EM is to penalize the usual nonparametric likelihood for departures of the target function from the class $${\cal Z}=\left\{f \ \Bigg | \, f^{1/2}(x)=\int_{\cal
M} K_h(u-x)f^{1/2}(u)\, \mathrm{d}u \ \mbox{for all $x \in {\cal S}$,}
\right\}$$ identified by the eigenfunction of the kernel.

\section{Examples}

\subsection{Density Estimation}

In studies of the AIDS epidemic the time of infection with the HIV
virus is often of central interest. As infections are only revealed
through repeated testing the event times are interval censored and
only known to fall between two consecutive clinic visits, one where
the patient tests negative for the presence of the virus and a
followup visit where they test positive. Density estimation in this
context may be facilitated by (\ref{llicdp}) which now simplifies to
\begin{align*}
{\cal L}({\bf a},x) &= \sum_{i=1}^n E_\lambda[K_h(X_i-x){\cal
P}(X_i-x)|I_i]-n \int_{\cal M} K_h(u-x)\exp\{{\cal P}(u-x)\}\,
\mathrm{d}u.
\end{align*}
Here ${\cal M}=\Re$ and $\mathcal{O}_i(u) = 1$ for all $u \in \Re$.
$X_i$ is the event time for the $i$th individual which is only known
to fall in the interval $I_i$. In addition, for the EMS iteration
(\ref{EMSic}) we recognize ${\cal M}(\hat{\bf \Lambda}_{r})$ as a
step in the EM algorithm of \cite{turnbull1976edf}.
\cite{braun2005lld} proposes a local-EM algorithm based on ${\cal
L}({\bf a},t)$ and, without being aware of it, develop an EMS
implementation.

If both the time of infection with the HIV virus and time of AIDS
onset are interval censored density estimation for the joint
distribution can still be facilitated by ${\cal L}({\bf a},t)$. Here
$X_i$ would be bivariate and ${\cal M}=\Re^2$. Typically for doubly
interval censored data estimation of the NPMLE for the joint
distribution is complex and a treatment of some of the issues can be
found in \cite{maathuis2005ran}. In the rest of this example we
consider a simple case that suggests local-EM in this context may
ease these complexities and thus deserves further exploration. For
example, use of a local-EM algorithm does not require identifying
maximal intersection (or inner most intervals in the univariate
case) and thus does require use of the height map algorithm of
\cite{maathuis2005ran}. Also the solution is unique while the NPMLE
is not. A Bayesian interpretation of local-EM provides insight into
the latter.

%Bivariate interval-censored data arise when times of two events of interest are both interval censored; for example, events of interest could be HIV infection and the manifestation of acquired immune deficiency syndrome. Here, we are interested in estimating the joint distribution of two related event times.

We give an example of hypothetical bivariate interval-censored data
to better illustrate the effect of this penalty functional on the
density estimation. As shown in Figure (??), this dataset consists
of eights observations, represented by four horizontal and four
vertical rectangles. Overlaying these observations forms a partition
of 81 unit squares, and intersections of these rectangles are
referred as maximal intersections, which are analogous to the
innermost intervals. Similarly, an density estimate that places
positive weights anywhere other than these maximal intersections
cannot be a NPMLE, and there are multiple NPMLE's. For example, a
uniform weight of 1/16 on all 16 maximal intersections, a weight of
1/4 on the positive diagonal maximal intersections, and a weight of
1/4 on the negative diagonal maximal intersections all maximize the
nonparametric likelihood. Consequently, the EM iteration will
converge to one of the solutions depending on the initial value.
However, the empirical evidence suggests otherwise for the EMS
algorithm.

When a radially symmetrical kernel is used, the EMS iteration will
always converge to the solution that favors the uniform weight of
1/16 on all maximal intersections in spite of initial values
regardless of the starting value. However, if the kernel with
bandwidth values of 1.5 and .15 in the x- and y-direction is rotated
by 45 degrees, the EMS iteration will converge to the solution that
favors the wight of 1/4 on the positive diagonal. Likewise,
iterations converges to the solution that favors the weights on the
negative diagonal if the elliptical kernel is rotated by -45
degrees. This phenomena can be explained by the penalty induced by
the kernel. When the kernel is radially symmetrical, any deviations
from the maximal eigenfunction are equally penalized. However, as
the kernel becomes more elliptical, deviations in the direction of
the major axis of the elliptical contour are penalized less than
those in the direction of the minor axis.
%\begin{figure} \centering
%\includegraphics[width=2in, height=2in, angle=0]{bivariate_example}
%\includegraphics[scale=.5]{bivariate_example_penality.png}
%\caption{??}
%\end{figure}

\begin{figure}
\vspace{-.05in} \centering
%
 \subfigure{\includegraphics[width=1.5in, height=1.5in, clip=true, trim=.8in 0.45in 1in .75in]{bivariate_example.pdf}}\\
% \subfigure{\includegraphics[width=1in, height=1in, clip=true, trim=.8in 0.45in 1in .75in]{bivariate_example.pdf}}\\
%
% \subfigure[EMS Estimate with Different Kernels]{\includegraphics[scale=.5, clip=true, trim=.8in 3in 1in 2.75in]{bivariate_interval_censored_30jan2009.pdf}}
\subfigure{\includegraphics[scale=.15, clip=true, trim=1.75in 0.45in
2in .55in]{ems_1_08mar2009.pdf}}
\subfigure{\includegraphics[scale=.15, clip=true, trim=2.25in 0.45in
2in .55in]{ems_2_08mar2009.pdf}}
\subfigure{\includegraphics[scale=.15, clip=true, trim=2.25in 0.45in
2in .55in]{ems_3_08mar2009.pdf}} \caption{EMS Estimates with
Different Kernels}
\end{figure}


\subsection{Panel Count Data}
We consider the study in which individuals are allowed to have
multiple events. The $X_{ij}$'s in the local likelihood (\ref{llge})
are interpreted as the $j$th event time of the $i$ individual Here,
each individual is subject to periodical assessments, and exact
event times are interval-censored.  Let $Y_i(t)$ be the
\emph{observed} counting process for individual $i$ and $\tau_{ij}$
denote the $j$th assessment time of the $i$th individual.  What are
observed are the increment $Y_{ij} = N_i(\tau_{ij}) -
N_i(\tau_{ij-1})$.
%Following the setup of
%\cite{Hu2008gls}, we let $\mathcal{T}_i$ denote the collection of
%the $i$th individual's assessment times and
%$$
%\mathcal{T} = \bigcup_i \mathcal{T}_i = \left\{ \tau_0, \tau_1, \ldots, \tau_k\right\} \mbox{ with $\tau_0=0$.}
%$$ %
%A partition $\mathcal{J}=\{ J_1,\ldots, J_k\}$ can be constructed
%with $J_j=[\tau_{j-1}, \tau_j]$. Moreover,

Individuals are allowed to drop out of study at different time
points, we use $Z_i(t)$ to indicate if the $i$th individual is in
the study including and after time $t$ and denote the number of
at-risk individuals by $Z(t)=\sum_i Z_i(t)$. Moreover, an
individual's event process is observable only before he/she drops
out; consequently, the intensity of the observed process equals
$Z_i(t) \lambda(t)$ (see \cite{andersen1993smb} for details).

We assume that the event, assessment and drop-out processes are
independent of one another, and the drop-out process is monotone and
discrete. Under this setting, the local likelihood (\ref{llge})
becomes
\begin{align}
 \mathcal{L}(\lambda, t) &= \sum_{ij} Z_i(X_{ij}) K_h(X_{ij}-x) \log\lambda(X_{ij}) -
\sum_{i} \int_{0}^{\infty} Z_i(u) K_h(u-x) \lambda(u) \,
\mathrm{d}u.
\end{align} Here $X_{ij}$ is the $j$th event time of the $i$ individual.
The corresponding EM mapping takes the form of
$$
[\mathcal{M}(\hat{\boldsymbol{\Lambda}}_{r})]_{l} = \sum_{ij} Y_{ij}
\frac{Z_i(\tau_l)\hat{\Lambda}_{rl} {\cal I}_{ijl}}{Z(\tau_l)\sum_m
\hat{\Lambda}_{rm} {\cal I}_{ijm}}
$$
and $$[\mathcal{K}_h]_{ls}= \frac{C(\tau_l)}{|\!|Q_l|\!|}\int_{J_s}
\dfrac{\int_{J_l}K_h\left({u-t}\right)\,
\mathrm{d}u}{\Psi_h[\mathbf{a}(t; \mathbf{\hat{\Lambda}}_r)]}\,
\mathrm{d}t.$$

%\noindent Note that $\int_{J_l} K_h( u-t)\, \mathrm{d}u \rightarrow \mathcal{I}_{J_l}(t)$ and $\Psi_h[\mathbf{a}(t;\mathbf{\hat{\Lambda}}_r)] \rightarrow Y(t)$ as $h \searrow 0$. It follows
%$$\lim_{h\downarrow 0} [\mathcal{K}_h]_{ls} =
%\lim_{h\downarrow
%0}\frac{Y(\tau_l)}{||J_l||}\int_{J_s}\dfrac{\int_{J_l}K_h\left({u-t}\right)\,
%\mathrm{d}u}{\Psi_h[\mathbf{a}(t; \mathbf{\hat{\Lambda}}_r)]}\,
%\mathrm{d}t
%=\dfrac{Y(\tau_l)}{||J_l||}\int_{J_s}\frac{1_{J_l}(t)}{Y(t)}\,
%\mathrm{d}t
%=\delta_{sl}.$$ Consequently,
\noindent As $h \downarrow 0$, %the smoothing matrix ${\cal K}_h$
%converges to the identity matrix and %
the iteration (\ref{EMSic}) becomes the self-consistent algorithm
algorithm of \cite{Hu2008gls}.

Simulations presented in this paper focus on improvements when using
local-EM due to the reduction of mean integrated squared error
(MISE). Simulations were conducted for both density and intensity
estimation but we only report those for the intensity case. Results
for the density case were extremely similar.

In the case of intensity estimation, we simulate an inhomogeneous
Poisson process with intensity $\lambda(t)$ equal to a re-scaled
gamma density function (shape = 9 and rate=3/4). A subject's event
times are simulated by thinning an unit-intensity Poisson process
where event times are either accepted or rejected with a probability
equal to $\lambda(t)$. Each subject is assumed to have a sequence of
predetermined observation times $t_1, t_2, \ldots, t_K$ where $t_i =
i$ and $K=20$. However, subjects miss a visit with increasing
probability, specifically, the probability of missing a visit equals
$(t_i/20)^{1/4} - 0.05$. Finally, a subject's panel counts are
obtained by aggregating events times among consecutive observed
visits. Note that each subject is assumed to have no event at time
0.

For each sample and for a fixed window $h$, we compute several
intensity estimates with a Gaussian kernel. The first,
$\hat{\lambda}_{ke}$, assumes no interval censoring has taken place
and uses the event times themselves, rather than the panel counts.
Using the data-dependent partition, we then compute the local-EM
estimator in both constant and linear cases and several other
estimators that are all based on smoothing the self-consistent
estimator, $\hat{\Lambda}_{HLL}$, in \cite{Hu2008gls} after their
algorithm has converged. The distinction between these latter
estimators depends on whether weights associated with
$\hat{\Lambda}_{HLL}$, are placed at the left-end, the middle, or
the right-end point of each $J_j$. For each window size $h$, K
samples were generated, and these were used to approximate each
estimator's MISE. We use the average ISE defined as $K^{-1} \sum_k
\int (\hat{\lambda}_k(u) - \lambda(u))^2 \, \mathrm{d}u$ as an
estimate of MISE. This was performed for 40 different values of $h$
between $0.05$ and $3.95$ with $K=300$. The resulting MISE's for
each estimator are plotted in Figure 1.

The results favor the local-EM estimator considerably. Not only does
it track the ideal estimator, $\hat{\lambda}_{ke}$ quite closely,
but it also attains the smallest MISE for all estimators based on
the censored data ($\hat{\lambda}_{ke}$ achieves a smaller MISE but
that's because it's based on the non-interval-censored data). This
is perhaps not all that surprising given $\lambda(t)$ is quite
non-linear. In cases where $\lambda(t)$ is linear the improvements
in MISE for the local-EM estimator are not as dramatic and care is
required otherwise the local-EM estimator can perform poorly.

\begin{figure}\centering
\includegraphics[scale=.6, angle=0]{mise_intensity}
\caption{The proposed local EM intensity estimate achieves the
lowest overall MISE with a small bandwidth of 0.195, comparing to
the smoothed EM estimate by placing expected increments at the
centres of pixels.} \label{f: sim_mise}
\end{figure}




\subsection{Spatial example: Lupus in Toronto}

The lupus clinic at Toronto Western Hospital has the location of residence of individuals with lupus in Toronto, Canada for the period from 1965 to 2007 (Give reference).    Lupus may have an environmental risk factor (cite some papers) which might be expected to result in lupus cases having a spatially structured risk surface.


The disease incidence locations and times $(x_{\ell m}, t_{\ell m})$ for individual $m$ in age and sex group $\ell$ are assumed to arise from an inhomogeneous Poisson process in space and time, with intensity
\[
\rho_\ell(s,t) = \lambda(s) \beta(t) \theta_\ell P_\ell(s,t)
\]
Here $P_\ell(s,t)$ is the population intensity (in persons per km square) of the $\ell$th age-sex group,  $\theta_\ell$ is the incidence rate for this age-sex group, and $\beta(t)$ is the time trend.  Using regionally aggregated case counts to estimate spatially varying relative risk surface $\lambda(s)$ is the objective.  The main complication is that the census regions used to aggregate the data have changed repeatedly over the study period.

The population data available are the population counts $P_{ij\ell}$ of group $\ell$ in region $S_{ij}$ for the $j$th census region in census period $i$.  The census periods are defined as beginning and ending at the mid-points between census years before and after the given census, with period $i$ covering the years $t_{i-1}$ to $t_i$.  The census regions $S_{ij}$ vary between census periods.  The data available are case counts are of the form $N_{ij\ell}$ of individuals in group $\ell$ who were diagnosed with lupus during census period $i$ while living in region $S_{ij}$.

The model is fit with a two-stage process where first  $\hat\beta(t)$ and $\hat\theta_\ell$ are estimated ignoring spatial variation in $\lambda(s)$, and these estimates are then used to construct offsets for use in the estimation of $\hat\lambda(x)$.  For simplicity we assume $\beta(t)$ and the population $P_\ell(s,t)$ are constant within census periods, reducing the parameters to $\beta_i, i=1\ldots T$.  The case counts $N_{ij\ell}$ are distributed as:
\[
N_{ij\ell} \sim \text{Poisson}(\theta_\ell \beta_i (t_{i}-t_{i-1}) P_{ij\ell}).
\]
The $\hat\beta_i$ and $\hat\theta_\ell$ can then be estimated from this generalised linear model.

Write $X_i=\{x_{ik}, k = 1 \ldots K_i\}$ to be locations of all the cases observed in census period $i$.  Treating the $\beta_i$ and $\theta_\ell$ as known, the $X_i$ are  inhomogeneous Poisson processes with intensity surfaces  $\rho_i(s) = \lambda(s)\off_i(s)$ and a likelihood function given by (\ref{eq:fulllikelihood}).
The ``offset'' surface $\off_i(s)$ constructed assuming the population is constant within census regions  with
\[
P_{i\ell}(s) = P_{ij\ell}/ |R_{ij}|; s \in R_{ij}.
\]
As census region boundaries are chosen to make regions as homogeneous as possible, this necessary assumption is not entirely unreasonable.  The offset surface is then written as
\[
\off_i(s) = \sum_\ell \hat\theta_\ell \hat\beta_i (t_i - t_{i-1}) P_{i\ell}(s).
\]
When the true locations $X_{ik}$ are  unknown, the total case counts $Y_{ij}$ in region $S_{ij}$  can be used to estimate the relative risk $\lambda(s)$ via an EMS algorithm.



\section{I don't know where this belongs}

These last two expressions permit a comparison between the local-EM
algorithm and methods in the literature. Note that if we only have a
single map ($j=1$) then $J_l$ \& $R_{jl}$ coincide so that
$I_{ijl}=0$ for all $j\neq l$. As a result the kernel weight
simplifies to $\int_{J_l}K_h\left({u-t}\right)\,
\mathrm{d}u/||J_l||$, the algorithm (\ref{localEM}) iterates once
and the local-EM estimator simply becomes the Nadaraya-Watson
estimator advocated by Brillinger (1990, 1991, 1994) in a series of
papers concerning spatial smoothing where data is aggregated to
regions within a map.

Also note (\ref{EMS}) may be written as
\begin{eqnarray}
\hat{\bf \Lambda}_{r+1}={\cal M}(\hat{\bf \Lambda}_{r}) {\cal K}_h,
\end{eqnarray}
where ${\cal K}_h$ is a $k \times k$ smoothing matrix with
entries
$$
{\cal K}_{ls} = \frac{1}{||J_l||} \int_{J_s}{{\int_{J_l}K_h\left({u-t}\right)\,
\mathrm{d}u}\over{\int_{\cal M}\tilde{K}_h(u-t)\, \mathrm{d}u}}\,
\mathrm{d}t,$$ and ${\cal M}(\hat{\bf \Lambda}^{r})$ is a $k$
dimensional row vector whose $l^{th}$ entry is
$$
\sum_{ij}n_{ij}{{\hat{\Lambda}_{rl} {\cal
I}_{ijl}}\over{\sum_m \hat{\Lambda}_{rm} {\cal I}_{ijm}}}\cdot
$$
In other words (\ref{EMS}) may be written explicitly  as an EMS
algorithm of the type advocated by \cite{silvermanems} although here
it is formally motivated by an EM-type strategy applied to local
likelihood. Some detailed comparison of (\ref{EMS}) to
\cite{silvermanems} provides further insight. The latter refers to
quantities analogous to $R_{jl}$ \& $J_l$ as observation and
reconstruction bins respectively. In particular, the context of
\cite{silvermanems} concerns image reconstruction centered on a
single image rather than multiple maps. Nevertheless what is
proposed in this paper could well be thought of as an extension of
the image reconstruction techniques of \cite{silvermanems} to an
epidemiological setting. Furthermore the expression (2.2) of
\cite{silvermanems} and ${\cal M}(\hat{\bf \Lambda}^{r})$ are
related where, for example, their weights $p_{st}$ simplify to our
indicator variables ${\cal I}_{ijl}$ because we assume the locations
$S_{ij}$ have been measured without error. This observation provides
an avenue for extending the local-EM toolbox to settings where data
is mis-measured but this is beyond the scope of this paper.

\bibliography{llems.bib}

\appendix
\section{Appendix}
\subsection{Local-EM and the Modified EMS algorithm}\label{appx_ems}
\cite{nychka1990spa} identified a relationship between EMS and a
penalized likelihood by demonstrating that a modified EMS algorithm
solves the system of score equation of the penalized likelihood. In
this section, we demonstrate that the local-EM algorithm may be used
to maximize a penalized likelihood with an appropriate choice of
kernel.  We refer to this kernel as an equivalent kernel. This
occurs because the equivalent kernel leads to Nychka's modification
of the EMS algorithm.

We begin by first considering the following nonparametric penalized
likelihood with $\off_i(Q_k) = \off_{k}$ and $\map_i = \map$:
%$$
%\mathcal{L}(\boldsymbol{\theta}) = \sum_{ij} Y_{ij} \log\left(
%\sum_k \mathcal{I}_{ijk} \theta_k^2 \right) - n \sum_{k} \theta_k^2.
%$$
%$$
%\mathcal{L}(\boldsymbol{\theta}) = \sum_{ij} Y_{ij} \log\left(
%\sum_{k;Q_k \subseteq S_{ij}} \theta_k^2 \right) - n \sum_{k}
%\theta_k^2.
%$$
%The inclusion of the roughness penalty yields
$$
\mathcal{L}_{p}(\boldsymbol{\Theta}) =
\mathcal{L}(\boldsymbol{\Theta}) - \boldsymbol{\Theta}^{T}
\mathbf{R} \boldsymbol{\Theta}.
$$ where $\mathcal{L}(\boldsymbol{\theta})$ is the nonparametric likelihood in (\ref{e:npllk}), $\mathbf{R} = n (\mathbf{S}^{-1} - \mathbf{I})$ and $\mathbf{S}$ is
any symmetrical smoothing matrix.  Note that the nonparametric
likelihood for density estimation in \cite{turnbull1976edf} is
equivalent to likelihood in (\ref{e:npllk}) with $\theta_k^2 = p_k$
and $\sum_k p_k = 1$.  Likewise, the likelihood for intensity
estimation in \cite{wellner2000npmle} is also equivalent to
(\ref{e:npllk}) with $\off_k = 1$ for all $k$.

We explore the relationship between this penalized likelihood and
the local-EM algorithm in the locally constant case by first
considering the following function
\begin{equation} \label{e:equivalent_kernel}
(1/\rho(u))^{1/2} K_h(u-x),
\end{equation}
where $\rho$ is the true density or intensity, and $K_h(u-x)$ is any symmetric positive kernel with compact support. Renormalization of (\ref{e:equivalent_kernel}) gives
$$
K_{h}^{\ast}(u - t) = (\rho(x)/\rho(u))^{1/2} K_h(u-x),
$$
and $\int K_{h}^{\ast}(u - x) \, \mathrm{d}u = 1+o(h)$ for any
interior point, $x$. We refer to the kernel $K_h^{\ast}$ as an
equivalent kernel.  Next, consider the use of the equivalent kernel
with the $\cal Q$-approximant $\bar{\lambda}_r$ in our local-EM
algorithm. This combination results in the conditional expectation
$\mbox{E}_{\hat{\lambda}_{r}}\left[ K_h^{\ast}(X-x) \mid X \in
S_{ij} \right]$ being approximated by
\begin{eqnarray*}
\mbox{E}_{\bar{\lambda}_{r}}\left[K_h^{\ast}(X-x) \mid S_{ij}
\right]&=& \sum_{k; Q_k \subseteq S_{ij}} \left( \dfrac{\off_\ell
\hat{\Lambda}_{\ell}^{(r)}}{\off_k \hat{\Lambda}_{k}^{(r)}}
\right)^{1/2} \int_{Q_{k}} K_h(u-x)\,\mathrm{d}u
\dfrac{\hat{\Lambda}_{k}^{(r)}}{ \sum_{k; Q_k \subseteq S_{ij}}
\hat{\Lambda}_{k}^{(r)}}
\end{eqnarray*}
for $x \in J_\ell$. This in turn gives the following iteration for
$\boldsymbol{\Lambda}$:
\begin{align}
\hat{\Lambda}_{\ell}^{(r+1)} &= n^{-1} \sum_{ij} \sum_{k; Q_k \subseteq S_{ij}} \left( \dfrac{\off_\ell
\hat{\Lambda}_{\ell}^{(r)}}{\off_k \hat{\Lambda}_{k}^{(r)}} \right)^{1/2} |\!| J |\!|^{-1} \int_{J_\ell} \dfrac{\int_{J_k} K_h(u-x) \, \mathrm{d}u}{\int_{\mathcal{M}} K_h^{\ast}(u-x) \, \mathrm{d}u} \, \mathrm{d}x \dfrac{ \mathcal{I}_{ijk} \hat{\Lambda}_{k}^{(r)}}{\sum_m \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{(r)}} \notag \\
& \approxeq  n^{-1} \sum_{ij} \sum_{k; Q_k \subseteq S_{ij}} \left( \dfrac{\off_\ell
\hat{\Lambda}_{\ell}^{(r)}}{\off_k \hat{\Lambda}_{k}^{(r)}}
\right)^{1/2} |\!| J |\!|^{-1} \int_{J_\ell} \int_{J_k} K_h(u-x) \,\mathrm{d}u \, \mathrm{d}x \dfrac{ \mathcal{I}_{ijk} \hat{\Lambda}_{k}^{(r)}}{\sum_m \mathcal{I}_{ijm} \hat{\Lambda}_{m}^{(r)}} \label{e:modified_EMS}
\end{align}
The expression (\ref{e:modified_EMS}) can be re-expressed in the following matrix form:
\begin{equation} \label{modified_EMSpl}
\hat{\boldsymbol{\Lambda}}^{(r+1)}={\cal
M}(\hat{\boldsymbol{\Lambda}}^{(r)}) {\cal
K}_h^{\ast}(\hat{\boldsymbol{\Lambda}}^{(r)}).
\end{equation}
${\cal K}_h^{\ast}\left(\hat{\boldsymbol{\Lambda}}^{(r)} \right) =
\hat{\boldsymbol{\Theta}}^{(r)} \, \mathcal{K}_h \,
\left(\hat{\boldsymbol{\Theta}}^{(r)}\right)^{-1}$, where
$\hat{\boldsymbol{\Theta}}^{(r)} = \text{diag}(\hat{\theta}_{k}^{(r)})$. Note
that $\off_k = 1$ in the context considered by \cite{silvermanems}
and \cite{nychka1990spa}. The iteration (\ref{modified_EMSpl}) is
recognized as Nychka's modified EMS algorithm with ${\bf S}={\cal
K}_h$.

In summary, the use of equivalent kernel in the local-EM algorithm
leads to an EMS equivalent to Nychka's modified EMS algorithm that
maximize the penalized likelihood
$\mathcal{L}_{p}(\boldsymbol{\Theta})$.

\subsection{Convergence of EMS to Local-EM}\label{appx_convergence}
%
Although the choice of the partition $\mathcal{Q}$ had depended on
the data in a natural way, it is quite arbitrary. For example, we
could consider a partition based on a set of $k$ equally spaced grid
points over a finite region $\mathcal{M}$. Without the loss of
generality, we consider the partition, elements of which are squares
centered at these grid points, and demonstrate that an EMS iteration
will converge to its local-EM counterpart as $k \rightarrow \infty$
in $\mathcal{L}^1$. For the sake of clarity, we restrict the
attention to the locally constant case.
% 
% We begin by first considering the density estimation, a special case
% in which $\off_i(x)=1$ for all $x \in \mathcal{M} = \Re$. 

Denote the EMS and local-EM iterates as $\hat{\lambda}_{k}^{(r)}$ and $\hat{\lambda}_{\infty}^{(r)}$, respectively. In addition, we assume $S_{ij} \subseteq \mathcal{M}$ for all $i, j$ and $|\!| \mathcal{M} |\!| \leq \infty$. $K(z)$ is a symmetric positive kernel with compact support and $\int K(z) \, \mathrm{d}z =1$. Finally, define a norm on $\mathcal{M}$ to be $|\!|\lambda|\!|_1 = \int_{\cal M} | \lambda(u) |\, \mathrm{d}u$ and interpret the convergence of the function $f$ to the function $g$ to mean that $|\!|f - g|\!|_1 \rightarrow 0$ as $k \to \infty$. This we denote as $f \xrightarrow{{\cal L}^1} g$. These details permit the statement of the following theorem:
%
\begin{theorem} \label{t:uniform_convergence} \end{theorem}
%\vspace{-5pt}
\begin{description}
\item{\bf I.} Define $\mathcal{F}_1 = \left\{ \lambda \in \mathcal{L}^1 \mid
\text{$\lambda$ is nonnegative with $\lambda(x) > 0$ for all $x \in
\mathcal{M}$} \right\}.$ For a common initial value $\hat{\lambda}_0
\in \mathcal{F}_1$, we have, for all $r = 1, 2, \ldots$,
\begin{description}
\item{A.} $\hat{\lambda}_{k}^{(r)} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{(r)}$, and
\item{B.} $\hat{\lambda}_{k}^{(r)}$, $\hat{\lambda}_{\infty}^{(r)} \in \mathcal{F}_1$.
\end{description} \label{e: ems_iterate_v1} %%
\item{\bf II.} When the equivalence kernel of \S\ref{appx_ems} is used, we
instead define
$$
\mathcal{F}_2 = \left\{ \lambda \in \mathcal{L}^1 {\Big |} \text{$\lambda$ is
nonnegative with $\lambda(x) > 0$ for all $x \in \mathcal{M}$ and
$\int_\mathcal{M} \lambda^{1/2} < \infty$}\right\}.
$$
For a common initial value $\hat{\lambda}_0 \in \mathcal{F}_2$, the
result A and B still hold for all $r$, where ${\cal F}_1$ is
replaced with ${\cal F}_2$ in B. \label{e:ems_iterate_v2}
\end{description}

\noindent Let the square root of a function $\lambda$ denoted by
$\lambda^{1/2}$ and $K(\cdot)$ be a symmetric positive kernel with
$K_h(\cdot)=K(\cdot/h)/h$ for some $h>0$. Define $\mathcal{H}_x$ to be
%\begin{enumerate}
%\item
$\mathcal{H}_x: \mathcal{L}^1 \mapsto \mathcal{L}^1$ such that
$$\mathcal{H}_x(\lambda) = \int \dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u)
K_h(u-x)\,\mathrm{d}u} \lambda(u)\, \mathrm{d}u,\ \mbox{and}$$
%\item $\mathcal{G}_x: \mathcal{L}^1 \times \mathcal{L}^1 \mapsto \mathcal{L}^1$ such that $\mathcal{G}_x(\lambda, g) = \int g^{1/2}(x) K_h (u-x) \lambda^{1/2}(u) \, \mathrm{d}u$.
%\end{enumerate}
% Note that if an equivalent kernel is used, then
% \[
% \int K_h^{\ast}(u-x) \lambda(u) \, \mathrm{d}u = \int
% \lambda^{1/2}(t) K_h(u-x) \lambda^{1/2}(u) \, \mathrm{d}u =
% \mathcal{G}_x(\lambda, \lambda)
% \]
The proof relies on $\mathcal{H}_x(\lambda)$ being a bounded linear functional as well as some other basic results in operator theory stated as lemmas below.  These lemma that may be found in \cite{royden1988ra}.

\begin{lemma}\label{lemma:piece_approx}
Let $\lambda \in \mathcal{L}^1$. Then the
$\mathcal{Q}_k$-approximant $g$ of $\lambda$ converges in ${\cal
L}^1$ to $\lambda$ on $\mathcal{M}$ as $k \to \infty$; that is, $g
\stackrel{\mathcal{L}^1}{\longrightarrow} \lambda$.
\end{lemma}
%
\begin{lemma}\label{lemma:bl_functional}
If $\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u \geq c > 0$, then
$\mathcal{H}_x$ is a linear bounded functional for all $f \in
\mathcal{L}^1$. That is, for all $x, a, b \in \Re$,
$\mathcal{H}_t(af+b) = a\mathcal{H}_x(f)+b$, and there exists a real
number $M_h$ such that $\mathcal{H}_x (f) \leq M_h |\!| f |\!|$.
\end{lemma}

\begin{lemma} \label{lemma:integral_op1}
Let $\gamma_h(u, x) = g(x) K_h(u-x) f(u)$, where $f$, $g \in \mathcal{L}^1$ and $K_h \in \mathcal{L}^\infty$.  Then $\gamma(u, t)$ is an
$\mathcal{L}^1$ function on $\Re^2$ with
\[
\iint \left| \gamma_h(u, x) \right| \, \mathrm{d}u \, \mathrm{d}x \leq
M_h \cdot \left(\int |g| \right) \cdot \left(\int |f| \right) .
\]
\end{lemma}


\noindent \textbf{Main Result:} Consider a fixed $h$ and $n$ throughout the entire proof.
\begin{enumerate}
\item[I.]
%\begin{enumerate}
%\item[A.]
%
Let $r=1$. Assume $\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u
\geq c > 0$. Note that $\int_{S_{ij}} \bar{\lambda}_{0}(u)
\,\mathrm{d}u = \int_{S_{ij}} \hat{\lambda}_0(u) \,\mathrm{d}u$ by
the definition of $\bar{\lambda}_{0}$. Repeated use of the triangle
inequality gives
\begin{equation*}
\begin{split}
\left|\!\left| \hat{\lambda}_{k}^{(1)} -
\hat{\lambda}_{\infty}^{(1)} \right|\!\right|_1
%
%= n^{-1} \int_{\mathcal{M}}\left | \sum_{ij}
%\dfrac{\int_{A_{ij}} \dfrac{K_h(u-x)}{\int O(u) K_h(u-x)\,\mathrm{d}u} (\bar{\lambda}_{0}(u) - \hat{\lambda}_0(u)) \,
%\mathrm{d}u}{\int_{A_{ij}} \hat{\lambda}_0(u) \,\mathrm{d}u}\right | \,\mathrm{d}x\\
%
%& \hspace{.25in}
&\leq n^{-1} \sum_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_0(u)\,\mathrm{d}u \right)^{-1} \int_{\mathcal{M}}
\int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u)
K_h(u-x)\,\mathrm{d}u}\left| \bar{\lambda}_{0}(u) -
\hat{\lambda}_0(u)\right|\, \mathrm{d}u \, \mathrm{d}x \\
%line 2
&\leq n^{-1} \sum_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_0(u)\,\mathrm{d}u \right)^{-1} \int_{\mathcal{M}}
\int_{\mathcal{M}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u)
K_h(u-x)\,\mathrm{d}u} \left| \bar{\lambda}_{0}(u) -
\hat{\lambda}_0(u)\right| \,\mathrm{d}u \,\mathrm{d}x \\
%line 3
%& \hspace{.25in} \leq n^{-1} \sum_{ij} \left( \int_{S_{ij}}
%\hat{\lambda}_0(u)\,\mathrm{d}u \right)^{-1} M_h |\!|
%\bar{\lambda}_{0} - \hat{\lambda}_0 |\!|_1 \int_{\mathcal{M}}\,\mathrm{d}x, \\
%line 4
&\leq n^{-1} M_h |\!| \mathcal{S}|\!| \sum_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_0(u)\,\mathrm{d}u \right)^{-1} |\!| \bar{\lambda}_{0}
- \hat{\lambda}_0 |\!|_1.
\end{split}
\end{equation*}
Here the last inequality is due to the finiteness of $\mathcal{S}
\subset \mathcal{M}$ and Lemma \ref{lemma:bl_functional}. By Lemma
\ref{lemma:piece_approx}, $\bar{\lambda}_{0}\xrightarrow{{\cal L}^1}
\hat{\lambda}_0$, thus $\hat{\lambda}_{k}^{(1)}
\stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{(1)}$. Moreover, Lemma
\ref{lemma:bl_functional} also ensures that
$\hat{\lambda}_{k}^{(1)}$ and $\hat{\lambda}_{\infty}^{(1)}$ both
belong to the class
$\mathcal{F}_1$.\\[20pt]
%
\noindent {\it Induction Step:}
%
Let $b_{ij}^{(r)} = \int_{S_{ij}}
\hat{\lambda}_{\infty}^{(r)}(v)\,\mathrm{d}v/ \int_{S_{ij}}
\bar{\lambda}^{(r)}(v)\,\mathrm{d}v$. Assume that
$\hat{\lambda}_{k}^{(r)} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{(r)}$ and $\hat{\lambda}_{k}^{(r)}$,
$\hat{\lambda}_{\infty}^{(r)} \in \mathcal{F}_1$.  With the repeated
use of the triangle inequality, we have
\begin{align*}
%line 1
& \left|\!\left| \hat{\lambda}_{k}^{(r+1)} - \hat{\lambda}_{\infty}^{(r+1)} \right|\!\right|_1 \\
%= n^{-1} \int_{\mathcal{S}}\left | \sum_{ij}
%\int_{A_{ij}} \dfrac{K_h(u-x)}{\int O(u) K_h(u-x)\,\mathrm{d}u} \left(\dfrac{\bar{\lambda}_{r}(u)}{\int_{A_{ij}}
%\bar{\lambda}_{r}(v)\,\mathrm{d}v}-\dfrac{\hat{\lambda}_{\infty\,
%r}(u)}{\int_{A_{ij}} \hat{\lambda}_{\infty\, r}(v) \,
%\mathrm{d}v}\,\mathrm{d}u\right)\right | \,\mathrm{d}x\\
%%line 2
&\hspace{.15in} \leq n^{-1} \sum_{ij}
\int_{\mathcal{M}}\int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}}
O(u) K_h(u-x)\,\mathrm{d}u}\left|
\left(\dfrac{\bar{\lambda}^{(r)}(u)}{\int_{S_{ij}}
\bar{\lambda}^{(r)}(v)\,\mathrm{d}v}-\dfrac{\hat{\lambda}_{\infty}^{(r)}(u)}{\int_{S_{ij}}
\hat{\lambda}_{\infty}^{(r)}(v)\,\mathrm{d}v}\right)\right |\,\mathrm{d}u\,\mathrm{d}x\\
%line 3
&\hspace{.15in}  \leq n^{-1} \sum_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_{\infty}^{(r)}(v)\,\mathrm{d}v\right)^{-1}
\int_{\mathcal{M}}\int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u}
\left | b_{ij}^{(r)} \,
\bar{\lambda}^{(r)}(u)-\hat{\lambda}_{\infty}^{(r)}(u) \right
|\,\mathrm{d}u \,\mathrm{d}x \\
%line 4
%&\hspace{-.25in} \leq n^{-1} \sum_{ij} \left( \int_{A_{ij}}
%\hat{\lambda}_{\infty\, r}(v)\,\mathrm{d}v\right)^{-1} M_h
%\left|\!\left| b_{ri} \, \bar{\lambda}_{r} -\hat{\lambda}_{\infty\, r}
%\right|\!\right|_{1} \int_{\mathcal{M}} \,\mathrm{d}x \\
%line 5
&\hspace{.15in}  \leq n^{-1} M_h |\!| \mathcal{S}|\!| \sum_{ij}
\left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{(r)}(v)\,\mathrm{d}v
\right)^{-1} \left|\!\left| b_{ij}^{(r)} \, \bar{\lambda}^{(r)} -
\hat{\lambda}_{\infty}^{(r)} \right|\!\right|_{1},
\end{align*}
The last inequality is again due to Lemma \ref{lemma:bl_functional}.
Now since the induction assumption implies $\bar{\lambda}^{(r)}
\xrightarrow{{\cal L}^1} \hat{\lambda}_{\infty}^{(r)}$ and
$b_{ij}^{(r)} \to 1$, we have, for all $i, j$,
\begin{equation*}
\Big|\!\Big|b_{ij}^{(r)} \, \bar{\lambda}^{(r)} -
\hat{\lambda}_{\infty}^{(r)} \Big|\!\Big|_1 \leq \Big|\!\Big|
b_{ij}^{(r)} \, \bar{\lambda}^{(r)} - \bar{\lambda}^{(r)}
\Big|\!\Big|_1 + \Big|\!\Big| \bar{\lambda}^{(r)} -
\hat{\lambda}_{\infty}^{(r)} \Big|\!\Big|_1 \to 0.
\end{equation*}%
In addition, it is evident that $\hat{\lambda}_{k}^{(r+1)}$ and
$\hat{\lambda}_{\infty}^{(r+1)}$ belong to $\mathcal{F}_1$ provided
that $\hat{\lambda}_{k}^{(r)}$, $\hat{\lambda}_{\infty}^{(r)} \in
\mathcal{F}_1$. Hence, we have (\textbf{A})
$\hat{\lambda}_{k}^{(r+1)} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{(r+1)}$ on $\mathcal{S}$, and ({\bf B})
$\hat{\lambda}_{k}^{(r+1)}$, $\hat{\lambda}_{\infty}^{(r+1)} \in
\mathcal{F}_1$ by induction.
%
%
% Equivalent Kernel
%
%
\item[II.]
%\begin{enumerate}
%\item[A.]
Let $r=1$. Assume $\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u
\geq c > 0$.  By the triangle inequality and Lemma
\ref{lemma:integral_op1}, we have
%
\begin{align*}
&\left|\!\left| \hat{\lambda}_{k}^{(1)} -
\hat{\lambda}_{\infty}^{(1)}
\right|\!\right|_1 %= n^{-1} \int_{\mathcal{M}} \left| \sum_{ij}
%\dfrac{\int_{A_{ij}} K_h(u-t) \left( \bar{\lambda}_{0}^{1/2}(u) \,
%\bar{\lambda}_{0}^{1/2}(t) - \hat{\lambda}_{0}^{1/2}(u) \,
%\hat{\lambda}_{0}^{1/2}(t) \right) \, \mathrm{d}u }{\int_{A_{ij}}
%\hat{\lambda}_{0}(t) \, \mathrm{d}t} \,
%\right| \mathrm{d}t \\
%%
%% line2
\leq n^{-1} \sum_{ij} \int_{\mathcal{M}}\int_{S_{ij}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u}
\left| \dfrac{\bar{\lambda}_{0}^{1/2}(u) \,
\bar{\lambda}_{0}^{1/2}(x) - \hat{\lambda}_{0}^{1/2}(u) \,
\hat{\lambda}_{0}^{1/2}(x) }{\int_{S_{ij}} \hat{\lambda}_{0}(v) \,
\mathrm{d}v} \,\right| \,\mathrm{d}u \,\mathrm{d}x \\
%%
&\\
% line 3
&\hspace{.15in} \leq n^{-1} \sum_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_{0}(v) \, \mathrm{d}v \right)^{-1} \left\{
\int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u}
\bar{\lambda}_{0}^{1/2}(x) \left| \bar{\lambda}_{0}^{1/2}(u) -
\hat{\lambda}_{0}^{1/2}(u) \right| \, \mathrm{d}u \, \mathrm{d}x \right. \\
%
& \hspace{2in} + \left. \int_{\mathcal{M}} \int_{\mathcal{M}}
\dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u}
\hat{\lambda}_{0}^{1/2}(u) \left| \bar{\lambda}_{0}^{1/2}(x) -
\hat{\lambda}_{0}^{1/2}(x) \right| \, \mathrm{d}u \, \mathrm{d}x \right\} \\
%%
&\\
%%
&\hspace{.15in} \leq n^{-1} M_h \sum_{ij} \dfrac{ \left|\!\left|
\bar{\lambda}_{0}^{1/2} \right|\!\right|_{1} \cdot \left|\!\left|
\bar{\lambda}_{0}^{1/2} - \hat{\lambda}_{0}^{1/2}
\right|\!\right|_{1} + \left|\!\left| \hat{\lambda}_{0}^{1/2}
\right|\!\right|_{1} \cdot \left|\!\left| \bar{\lambda}_{0}^{1/2} -
\hat{\lambda}_{0}^{1/2} \right|\!\right|_{1} }{\int_{A_{ij}}
\hat{\lambda}_{0}(v) \, \mathrm{d}v}
\end{align*}
%
%Because $\bar{\lambda}_{0} \stackrel{\mathcal{L}^1}{\longrightarrow}
%\hat{\lambda}_0$ by Lemma \ref{lemma:piece_approx},  $\left|\!\left|
%\bar{\lambda}_{0}^{1/2} - \hat{\lambda}_{0}^{1/2}
%\right|\!\right|_{1} \to 0$ by the continuous mapping theorem
%(\textsc{cmt}).
By Lemma \ref{lemma:piece_approx} and the continuous mapping theorem
(\textsc{cmt}), $\left|\!\left| \bar{\lambda}_{0}^{1/2} -
\hat{\lambda}_{0}^{1/2} \right|\!\right|_{1} \to 0$. Therefore,
$\hat{\lambda}_{k}^{(1)} \stackrel{\mathcal{L}^1}{\longrightarrow}
\hat{\lambda}_{\infty}^{(1)}$. Furthermore, if we choose
$\hat{\lambda}_0^{1/2}$ to be bounded above, then
$\hat{\lambda}_{k}^{(1)}$ and $\hat{\lambda}_{\infty}^{(1)}$ will be
also bounded. This, in turn, ensures
that $\hat{\lambda}_{k}^{(1)}, \ \hat{\lambda}_{\infty}^{(1)} \in \mathcal{F}_2$.\\[20pt]
%
\noindent{\it Induction Step:}
%
Assume that $\hat{\lambda}_{k}^{(r)} \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{(r)}$, $\hat{\lambda}_{k}^{(r)}$, $\hat{\lambda}_{\infty}^{(r)} \in \mathcal{F}_2$, and bounded. %Let $c_{ri} = \int_{A_{ij}} \hat{\lambda}_{\infty\, r}(v) \, \mathrm{d}v /\int_{A_{ij}} \bar{\lambda}_{r}(v) \, \mathrm{d}v$.
Then the induction assumption immediately implies that
$$
c_{ij}^{(r)} = \frac{\int_{S_{ij}} \hat{\lambda}_{\infty}^{(r)}(v)
\, \mathrm{d}v}{\int_{S_{ij}} \hat{\lambda}_{k}^{(r)}(v) \,
\mathrm{d}v} \to 1\ \mbox{for all $i, j$ and }
(\bar{\lambda}^{(r)})^{1/2}\stackrel{\mathcal{L}^1}{\longrightarrow}
(\hat{\lambda}_{\infty}^{(r)})^{1/2}\ \mbox{on $\mathcal{M}$.}$$
Similar to part (I), Lemma \ref{lemma:piece_approx} and
\ref{lemma:integral_op1} imply that
%
\begin{align*}
& \left|\!\left| \hat{\lambda}_{k}^{(r+1)} - \hat{\lambda}_{\infty}^{(r+1)} \right|\!\right|_1 \\
% line 2
&\hspace{.1in} \leq n^{-1} \sum_{ij} \left( \int_{S_{ij}} \hat{\lambda}_{\infty}^{(r)}(x) \, \mathrm{d}x \right)^{-1} \int_{\mathcal{M}} \int_{S_{ij}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u} \left| c_{ij}^{(r)} \left[\bar{\lambda}^{(r)}(u) \, \bar{\lambda}^{(r)}(x)\right]^{1/2} \right.\\
& \hspace{4.5in}- \left.\left[\hat{\lambda}_{\infty}^{(r)}(u) \, \hat{\lambda}_{\infty}^{(r)}(x)\right]^{1/2} \right| \, \mathrm{d}u \, \mathrm{d}x \\
% line 3
&\hspace{.15in} \leq n^{-1} \sum_{ij} \left(\int_{S_{ij}} \hat{\lambda}_{\infty}^{(r)}(x) \, \mathrm{d}x \right)^{-1}
\int_{\mathcal{M}} \int_{\mathcal{M}} \dfrac{K_h(u-x)}{\int_{\mathcal{M}} O(u) K_h(u-x)\,\mathrm{d}u} \left\{ c_{ij}^{(r)}
\left( \bar{\lambda}^{(r)}(x) \left| \bar{\lambda}^{(r)}(u) - \hat{\lambda}_{\infty}^{(r)}(u) \right|\right)^{1/2} \right. \\
% line 4
& \hspace{.9in} + \left. \left| c_{ij}^{(r)} - 1 \right|
\left(\hat{\lambda}_{\infty}^{(r)}(u) \bar{\lambda}^{(r)}(x)
\right)^{1/2} + \left(\hat{\lambda}_{\infty}^{(r)}(u) \left|
\bar{\lambda}^{(r)}(x)
- \hat{\lambda}_{\infty}^{(r)}(x) \right| \right)^{1/2} \right\} \, \mathrm{d}u \, \mathrm{d}x \\
% line 5
&\hspace{.15in} \leq n^{-1} M_h \sum_{ij} \left( \int_{S_{ij}}
\hat{\lambda}_{\infty}^{(r)}(x) \, \mathrm{d}x\right)^{-1} \Bigg[ 
|c_{ij}^{(r)}| \left|\!\left| (\bar{\lambda}^{(r)})^{1/2}
\right|\!\right|_{1} \cdot \left|\!\left|
(\bar{\lambda}^{(r)})^{1/2} - (\hat{\lambda}_{\infty}^{(r)})^{1/2}
\right|\!\right|_{1}  \\
% line 6
& \hspace{.9in} + |c_{ij}^{(r)} - 1| \left|\!\left|
(\bar{\lambda}^{(r)})^{1/2} \right|\!\right|_{1} \cdot
\left|\!\left| (\hat{\lambda}_{\infty}^{(r)})^{1/2}
\right|\!\right|_{1} + \left|\!\left|
(\hat{\lambda}_{\infty}^{(r)})^{1/2} \right|\!\right|_{1} \cdot
\left|\!\left| (\bar{\lambda}^{(r)})^{1/2} -
(\hat{\lambda}_{\infty}^{(r)})^{1/2} \right|\!\right|_{1} 
\Bigg] \to 0.
\end{align*}
%
% Next, define two functions, $\hat{\zeta}_{k\, r}$ and
% $\hat{\zeta}_{\infty\, r}$, to be
% $$
% \hat{\zeta}_{k\, r}(x) = \int_{\mathcal{M}} K_h(u-x) \bar{\lambda}_{r}^{1/2}(u) \,
% \mathrm{d}u \text{ and } \hat{\zeta}_{\infty\, r}(x) = \int_{\mathcal{M}}
% K_h(u-x) \hat{\lambda}_{\infty\, r}^{1/2}(u) \, \mathrm{d}u.
% $$
%By Lemma \ref{lemma:integral_op2}, $\hat{\zeta}_{k\, r}\ \hat{\zeta}_{\infty\, r} \in \mathcal{F}_2$.
Provided that $\hat{\lambda}_{k}^{(r)}$ and $\hat{\lambda}_{\infty}^{(r)}$ are bounded, $\hat{\lambda}_{k}^{(r+1)}$ and $\hat{\lambda}_{\infty}^{(r+1)}$ are bounded, implying that $\int_{\mathcal{M}} (\hat{\lambda}_{k}^{(r)})^{1/2} < \infty$ and $\int_{\mathcal{M}} (\hat{\lambda}_{\infty}^{(r+1)})^{1/2} < \infty$.
%
%
It follows that ({\bf A}) $\hat{\lambda}_{k}^{(r)}(x) \stackrel{\mathcal{L}^1}{\longrightarrow} \hat{\lambda}_{\infty}^{(r)}(x)$, and ({\bf B}) $\hat{\lambda}_{k}^{(r)}$, $\hat{\lambda}_{\infty}^{(r)} \in \mathcal{F}_2$ by induction.

\end{enumerate}




\subsection{Local-EM and the Penalized Likelihood} \label{appx_penalized}

%In this section we study the penalized likelihood of \S\ref{appx_ems} under the conditions of \S\ref{appx_convergence} in which $k\rightarrow \infty$.
We begin by considering those values of $\boldsymbol{\theta}$ for which the penalty $\boldsymbol{\theta}^{T} \mathbf{R} \boldsymbol{\theta}$ is minimized. For such $\boldsymbol{\theta}$, we have
\begin{eqnarray*}
\mathbf{R} \boldsymbol{\theta}&=&(\mathbf{\cal K}_h^{-1}
-\mathbf{I})\boldsymbol{\theta}=0
\end{eqnarray*}
or rather
\begin{eqnarray}\label{eigen}
\boldsymbol{\theta}&=&\mathbf{\cal K}_h\ \boldsymbol{\theta}.
\end{eqnarray}
This permits an interpretation of ${\cal L}_p(\boldsymbol{\theta})$
as penalizing the nonparametric likelihood on the basis of the
proximity of $\boldsymbol{\theta}$ to the maximal eigenvector of
the smoothing matrix $\mathbf{\cal K}_h$. To see this, let
$\varrho_{(\ell)}$ denote the $\ell$th largest eigenvalue of
$\mathcal{K}_h$ with its corresponding eigenvector
$\boldsymbol{\gamma}_{(\ell)}$. Let
$\boldsymbol{\Gamma}=\begin{bmatrix} \boldsymbol{\gamma}_{(k)} &
\boldsymbol{\gamma}_{(k-1)}& \cdots & \boldsymbol{\gamma}_{(1)}
\end{bmatrix}$. Then the spectral decomposition of $\mathbf{R}$ is
$ \boldsymbol{\Gamma} \mathbf{D} \boldsymbol{\Gamma}^{T},$ where
$\mathbf{D}=\text{diag}\left( \varrho_{(k)}^{-1}, \ \varrho_{(k-1)}^{-1},
\ \ldots, \ \varrho_{(1)}^{-1}\right) - \mathbf{I}$.
%
Since $\varrho_{(1)} \leq 1$, $\mathbf{R}$ penalizes eigenvectors with small eigenvalues more than
that with large ones.

We note that for $\boldsymbol{\theta}$ satisfying the condition
(\ref{eigen})
\begin{equation}
\boldsymbol{\theta}^T\boldsymbol{\theta}-\boldsymbol{\theta}^T{\cal
K}_h\ \boldsymbol{\theta}=0 \label{eigen_2}.
\end{equation}
We may consider the limit of the left-hand side as $k
\rightarrow \infty$. Let $\Delta = \Delta_u \Delta_x$.  ${\cal K}_{jk}$ and $\theta_j$ may be
approximated as
\begin{eqnarray*}
{\cal K}_{kj}&=&\Delta^{-1} \int_{J_j}\int_{J_k} K_h(u-x)\,
\mathrm{d}u \mathrm{d}x
\approx K_h(x_j-x_k) \, \Delta \\
\theta_j^2 &=& \left( \int_{J_j} \rho(u)\, \mathrm{d}u \right)^{1/2} \left( \int_{J_j} \rho(x)\, \mathrm{d}u \right)^{1/2} =
\rho(x_j) \Delta
\end{eqnarray*}
and so the left-hand side of (\ref{eigen_2}) becomes
$$
\sum_j \rho(t_j)\Delta - \sum_{jk} \rho^{1/2}(x_j)K_h(x_j-x_k) \rho^{1/2}(x_k)\, \Delta^2.
$$
If we let $\Delta \downarrow 0$, the above expression becomes
$$
\int_{\cal T}\rho(u)\, \mathrm{d}u-\int_{\cal T}\int_{\cal
T} \rho^{1/2}(x) K_h(u-x) \rho^{1/2}(u)\, \mathrm{d}u\,\mathrm{d}x,
$$
As a result, the penalty will equal 0 for any function $f$ belonging
to the following class
$$
{\cal Z}=\left\{f \ \Bigg | \, f^{1/2}(x)=\int_{\cal
T}K_h(u-x)f^{1/2}(u)\, \mathrm{d}u \mbox{ for all $x \in {\cal S}$}
\right\}.
$$
Given this and the results of \S\ref{appx_convergence}, we speculate that the local-EM
algorithm maximizes the likelihood
\begin{eqnarray*}
\mathcal{L}(\rho)=\sum_{ij} n_{ij} \log\left(\int_{A_{ij}} \rho(u)\, \mathrm{d}u \right)
- \sum_i \int_{\mathcal{M}_i} \rho(u)\, \mathrm{d}u
\end{eqnarray*}
on the basis of the proximity of the function $\rho$ to the class of
maximal eigenfunctions $\cal Z$.
\end{document}
