\documentclass[12pt]{article}
\usepackage{layout,latexsym, array, enumerate, amsmath, amsthm,amssymb, amsfonts, natbib, subfigure, color, float}
\usepackage[mathscr]{eucal}
\usepackage{epsf,epsfig}
%\usepackage{epsf,epsfig,eufrak,dsfont}
\bibliographystyle{apalike}

\definecolor{grey}{RGB}{190,190,190}

\textwidth 6.5in \textheight 9.00in \oddsidemargin -0.15in
\evensidemargin -0.15in \topmargin -0.25in
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}{Proposition}[section]
\newtheorem{corollary}{Corollary}[section]
\newtheorem{example}{Example}[section]
\newtheorem{lemma}{Lemma}[section]
\newtheorem{defn}{Definition}[section]
\newcommand{\lowtilde}[1]{\mathop{#1}\limits_{\textstyle\tilde{}}}
%\renewcommand{\baselinestretch}{1.4}

\newcommand{\off}{\mathcal{O}}
\newcommand{\poly}{{\cal P}(u-s;\mathbf{a})}
\newcommand{\coef}{\mathbf{a}}
\newcommand{\lik}{\ensuremath{\mathcal{L}}}
\newcommand{\map}{\ensuremath{\mathcal{M}}}
\newcommand{\area}[1]{\ensuremath{|\!|#1|\!|}}
\newcommand{\norm}[1]{\ensuremath{\left|\!\left|#1\right|\!\right|_1}}
\newcommand{\dee}[1]{\ensuremath{\,\mathrm{d}#1}}
\newcommand{\Steve}[1]{{\tt (#1)}}

\newcommand{\comment}[1]{}

\begin{document}
\title{A Local-\textsc{EM} Algorithm for Mismeasured Data}
\author{Patrick E. Brown\footnote{University of Toronto and Cancer Care Ontario}, Paul Nguyen\footnote{University of Toronto} and Jamie Stafford\footnote{University of Toronto}}
\thispagestyle{empty}
\date{}
\maketitle

\section{Introduction}

In many biomedical studies, it is common that observations cannot be measured accurately due to several reasons, such as nature of the observations of interest, monitoring costs or protecting confidentiality. For example, a person, who has been exposed to HIV, will likely test negative within a few months after being infected, but will be properly diagnosed at future follow-up tests. This is due to the fact that the body may take six weeks to a year to develop antibodies of this virus [cite]. Standard analysis of these mismeasured data generally leads to biased estimations and poor inference \citep{fuller1987, carroll2006}. 

Mismeasured data can be linked to the data of real interest using the integral operator given by
\begin{equation}\label{operator}
f(x_j) = \int K(x_j,y_j) g(y_j) \dee{y_j}; j = 1,\ldots,n.
\end{equation}
Here, the mismeasured observations $x_j$ are drawn from the density $f$, but the density $g$ and observations $y_j$ are of interest. The nonnegative kernel function $K(x_j,y_j)$ is the conditional density of $X_j$ given $y_j$ and assumed to be known. 

One fundamental problem that involves mismeasured data assumes the error is additive; specifically, we estimate $g$ when $X_j = Y_j + U_j$ is observed where the density of $U_j$ is assumed to be known \citep{carroll2006}. In this situation, we can estimate $g$ using deconvolution, which requires no parametric assumptions. In this paper, we extend on the deconvolution method to analyze mismeasured observations that are interval-censored. We first show the relationship between the deconvolution method and \textsc{EM} algorithm. Then, we discuss a local-\textsc{EM} algorithm to estimate $g$ from such mismeasured data. We implement the \textsc{E}-step of this algorithm by approximating the conditional expectations using a piecewise constant density function. The results of this local-\textsc{EM} algorithm collapse explicitly into the \textsc{EMS} algorithm proposed by \citet{silvermanems}. 

\section{Non-Censored Data}

\subsection{Deconvolution Method versus \textsc{EM} Algorithm}

Suppose the mismeasured data has the form $X_j = Y_j + U_j$, where $Y_j$ is of interest and density of $U_j$, $k$, is known. Then, the kernel function in (\ref{operator}) becomes $K(x_j,y_j) = k(x_j - y_j)$. So, $f$ is represented as a convolution of $g$ and $h$ given by
\begin{equation}\label{convolution}
f(x_j) = \int g(y_j) k(x_j - y_j) \dee{y_j} = g * k(x_j)
\end{equation}

Deconvoluting (\ref{convolution}) yields the kernel density estimator, $\hat{g}$, given by
\begin{equation}\label{deconvoluting_est}
\hat{g}(y) = \frac{1}{n}\sum\limits_{j = 1}^n K_h^* (x_j - y)
\end{equation}
where
\begin{equation}\label{deconvoluting_kernel}
K_h^*(t) = \frac{1}{2\pi h} \int \exp(itw/h) \frac{\phi_X (w)}{\phi_U (w/h)} \dee{w}.
\end{equation}
Here, $h$ is the bandwidth and $\phi_X$ and $\phi_U$ are the characteristic functions of $X_j$ and $U_j$, respectively. 

Another solution to this problem uses the density estimate for $g$ given by
\begin{equation}\label{density_est}
\hat{g}(y) = \frac{1}{n}\sum\limits_{j = 1}^n K_h (y_j - y)
\end{equation}
where $K_h$ is the kernel. The choice of this kernel is relatively unimportant; common choices are the standard normal density or a density with bounded support.

Conditional on $Y_j$, applying the \textsc{EM} algorithm to (\ref{density_est}) yields
\begin{align}\label{expected_est}
\hat{g}(y) & = \frac{1}{n}\sum\limits_{j = 1}^n \textsc{E} \left[K_h (Y - Y_j) | X_j \right] \nonumber \\
& = \frac{1}{n}\sum\limits_j \int K_h (y_j - y) \Pr(Y_j = y_j|X_j = x_j) \dee{x_j} \nonumber \\
& = \frac{1}{n}\sum\limits_j \int K_h (y_j - y) \frac{\Pr(X_j = x_j|Y_j = y_j)\Pr(Y_j = y_j)}{\Pr(X_j = x_j)} \dee{y_j} \nonumber \\
& = \frac{1}{n}\sum\limits_j \int K_h (y_j - y) \frac{k(x_j - y_j)\Pr(Y_j = y_j)}{\Pr(X_j = x_j)} \dee{y_j}
\end{align}
We can define $\Pr(Y_j = y_j)$ as
\[
\Pr(Y_j = y_j) = \frac{g(y_j)}{\int g(w) \dee{w}}
\]
and $\Pr(X_j = x_j)$ as
\begin{align*}
\Pr(X_j = x_j) & = \int \Pr(X_j = x_j|V_j = v_j) \Pr(V_j = v_j) \dee{v_j} \\
& = \int k(x_j - v_j) \frac{g(v_j)}{\int g(w) \dee{w}} \dee{v_j} \\
& = \frac{g * k(x_j)}{\int g(w) \dee{w}}.
\end{align*}
This expectation (\ref{expected_est}) depends on $g$, so the procedure is iterative. That is, at iteration $r + 1$, $g^{(r)}$ is used to take the expectation of $\hat{g}$. Then, the \textsc{EM} recursion at iteration $r+1$ becomes
\begin{equation}\label{em_est}
\hat{g}^{(r+1)}(y) = \frac{1}{n}\sum\limits_j \int K_h (y_j - y) k(x_j - y_j) \frac{\hat{g}^{(r)}(y_j)}{\hat{g}^{(r)} * k(x_j)} \dee{y_j}.
\end{equation}
If we set $\hat{g}^{(0}(y)$ as a constant and substitute $z_j = y_j - y$, then $\hat{g}^{(0)}(y_j)/\hat{g}^{(0)} * k(x_j) = 1$ and the first iteration of (\ref{em_est}) becomes
\begin{align}\label{first_em_est}
\hat{g}^{(1)}(y) & = \frac{1}{n}\sum\limits_j \int K_h (z_j) k(x_j - y - z_j) \dee{z_j} \nonumber \\
& = \frac{1}{n}\sum\limits_j K_h * k(x_j - y).
\end{align}

Under these settings, the deconvoluting kernel density estimator (\ref{deconvoluting_est}) is a representation of (\ref{first_em_est}). The EM algorithm allows more flexibility in the choice of the kernel function. In specific situations, $K_h * k(\cdot)$ in (\ref{first_em_est}) can have the same form as $K_h^*(\cdot)$ in (\ref{deconvoluting_kernel}). 

However, the EM algorithm may not converge at the first iteration with these assumptions. Also, it is not necessary true that both methods yield similar results....

\section{Interval Censored Data}

Suppose the miseasured data are interval censored; that is, we observe the data $I_j$ such that $x_j \in I_j$. The observations of interest may or may not be interval-censored; so, we define the data of interest to be $S_j$ such that $y_j\in S_j$ where $S_j$ may only contain the observations $y_j$. To deal with problem, local likelihood and kernel smoothing methods are used.

 we start with the likelihood of $g$ given by
\begin{equation}\label{likelihood}
L(g; \{x_j,y_j\}) = \prod\limits_{j = 1}^n g(y_j).
\end{equation}

Local likelihood computes a nonparametric estimate of $g(y)$ by giving more weight to the data close to $y$ when estimating at $y$. Applying a kernel $K_h$ with bandwidth $h$ to the log of the likelihood in (\ref{likelihood}) and bringing the kernel inside the sum, yields
\begin{equation}\label{locallikelihood}
\ell(g; y) = \sum\limits_{j=1}^n K_h(y_j - y) \log(g(y_j)).
\end{equation}

Maximizing (\ref{locallikelihood}) directly is statistically and numerically difficult. So, $g$ is approximated with exponentiated polynomials of \citet{loader1999lra}. The simplest model is the locally constant model with $g(y_j) = \exp(a_0)$, which will be used here. 

\subsection{Local-\textsc{EM} Algorithm}

The local-\textsc{EM} algorithm maximizes the expected likelihood in place of (\ref{locallikelihood}), to account for $x_j\in I_j$ and $y_j\in S_j$ being interval censored. As the expectation depends on $g$, the procedure is iterative. At iteration $r + 1$, the density $g^{(r)}$ is used to take the expectation of $\ell(g; y)$, conditioning on $x_j\in I_j$ and $y_j\in S_j$. 
\begin{equation}\label{localEM}
\ell^{(r+1)}(g; y) = \sum\limits_{j=1}^n \text{E}[K_h(y_j - y) a_0|x_j\in I_j, y_j\in S_j; \hat{g}^{(r)}].
\end{equation}
Differentiating (\ref{localEM}) with $a_0$ gives
\begin{equation}\label{formalEMS}
\hat{g}(y) = \frac{1}{n}\sum\limits_{j=1}^n \text{E}[K_h(y_j - y)|x_j\in I_j; y_j\in S_j]
\end{equation}
\verb"Where does this 1/n come from?"

Setting $h(x_j,y_j)$ as the joint density, the kernel function in (\ref{operator}) becomes $K(x_j,y_j) = h(y_j|x_j)$ and 
\begin{equation}\label{expectedEM}
E[K_h(y_j - y)|x_j\in I_j; y_j\in S_j] = \frac{\int_{S_j} \int_{I_j} K_h(y_j - y) h(x_j,y_j) \dee{y_j}\dee{x_j}}{\int_{S_j} \int_{I_j} h(x_j,y_j) \dee{y_j}\dee{x_j}}.
\end{equation}

We discretize $g$ and $K_h$ as piecewise constant functions given by
\[
g(u) \simeq \frac{g_j(u)}{||J_j||}; u \in J_j \mbox{ and } K_h(u - y) \simeq \frac{K_j(u)}{||J_j||}; u \in J_j
\]
where
\[
g_j(u) = \int_{J_j} g(u) \dee{u}
\]
and
\[
K_j(u) = \int_{J_j} K_h(u - y) \dee{u}.
\]

Then, the numerator of (\ref{expectedEM}) is given by
\begin{eqnarray}\label{numer}
\int_{S_j} \int_{I_j} K_h(y_j - y) h(x_j,y_j) \dee{y_j}\dee{x_j} & = & \int_{S_j} K_h(y_j - y) g(y_j) \int_{I_j}  h(y_j|x_j) \dee{y_j}\dee{x_j} \nonumber \\
& = & \sum\limits_j \frac{K_j(y_j)}{||J_j||} \frac{g_j(y_j)}{||J_j||} \int_{J_j} \int_{I_j} h(y_j|x_j) \dee{y_j}\dee{x_j}
\end{eqnarray}
and the denominator of (\ref{expectedEM}) is given by
\begin{eqnarray}\label{denom}
\int_{S_j} \int_{I_j} h(x_j,y_j) \dee{y_j}\dee{x_j} & = & \int_{S_j} g(y_j) \int_{I_j}  h(y_j|x_j) \dee{y_j}\dee{x_j} \nonumber \\
& = & \sum\limits_j \frac{g_j(y_j)}{||J_j||} \int_{J_j} \int_{I_j} h(y_j|x_j) \dee{y_j}\dee{x_j}.
\end{eqnarray}

Substituting (\ref{numer}) and (\ref{denom}) into (\ref{formalEMS}) leads to the iteration
\begin{equation}\label{finalEMS}
\hat{g}(y) = \sum\limits_{ij} \frac{g_j(y_j) q_{ij}}{\sum\limits_k g_k(y_j) q_{ik}} \frac{K_j(y_j)}{||J_j||}
\end{equation}
where
\[
q_{ij} = \frac{1}{||J_j||} \int_{J_j} \int_{I_j} h(y_j|x_j) \dee{y_j}\dee{x_j}.
\]


\section{Discussion}

\bibliography{llems}

\end{document}
