%!TEX root = paper.tex

\section{Objects Selection}
\label{sec:select}
In this section, we discuss how $\Obj^+$ and $\Obj^-$ are
selected in Algorithm~\ref{algo:sample} (line~\ref{line:sample:select}).  

At a high level, this process is analogous to the problem of computing
a small \emph{coreset} of a large point set that can be used for
approximating various ``extent measures'' of the full point
set~\cite{ccg05-AgarwalHarPeledVaradarajan-coreset_survey}.  More
specifically, for the exploration query evaluation problem, we would
like to approximate two types of extent measures---the sparse points
$\RePre \subseteq \Result$, and a density estimate for all other
points $\Result \setminus \RePre$.  The challenge here is that we do
not have direct access to the full point set $\Result$.  For example,
as we treat the evaluation function $f$ as a blackbox, there is no way
to sample points uniformly and independently from $\Result$ without
computing it first, i.e., performing full evaluation on all objects.
In other words, with an $f$ whose behavior is unknown to
Algorithm~\ref{algo:sample}, the coreset we construct cannot be an
arbitrary subset of $\Result$.  It has to be the result point set
obtained by evaluating $f$ for a subset of the objects in $\Obj$.

Overall, given a budget $\eta$ on the number of tuples in
$\{\R_i\}_{i \in \Obj^+ \cup \Obj^-}$, we first select $\Obj^+$ with
budget $\eta^+$, and then use whatever is left of the budget to select
$\Obj^-$.

\subsection{Selecting $\Obj^+$}
\label{sec:select:pos}
The objects selection step in Algorithm~\ref{algo:sample} (especially
the choice of $\Obj^+$) is crucial to the efficiency of the algorithm
and to the quality of approximation ($\RePreProx$, $\ReSke$) to the
actual solution ($\RePre$, $\Result\setminus\RePre$).  A large
$\Obj^+$ will give $\RePreProx$ a good coverage of $\RePre$.  However,
a large $\Obj^+$ also leads to slower evaluation.  Given a fixed
budget $\eta^+$ on the number of tuples allowed to be accessed,
efficiency would not be an issue anymore, as the number of tuples to
be evaluated is limited, so the question becomes how to choose
$\Obj^+$ of limited size in order to maximize the recall of
$\RePreProx$ with respect to $\RePre$.

Observe that for any point $p \in \RePre$ where $p \in f(\R_i)$ (i.e.,
$p$ comes from object $i$), it is a necessary condition that
$i \in \Obj^+$ for $p$ to possibly appear in $\RePreProx$.  We assume
that if an object produces outlier points of $\Result$ (when $f$ is
evaluated on the full data), it is likely to also produce outlier
points when the same query $f$ is evaluated on a sample of its data.
Therefore, the prefetched sample serves as a good guide for choosing
$\Obj^+$.  We take a partition-based approach to address this
problem.

Recall that $\Result\Sample$ denotes the result of executing $f$ on
the prefetched sample for all objects (Algorithm~\ref{algo:sample},
line~\ref{line:sample:exe}).  To select objects for $\Obj^+$, we first
partition $\Result\Sample$ into $\Partition\Sample$ into grid cells
each of size $r_x\Sample \times r_y\Sample$.  More precisely,
$\Partition\Sample = \bigcup_{i,j\in\Z} \Partition \Sample_{ij}$,
where
$\Partition\Sample_{ij} = \Result\Sample \cap \square\Sample_{ij}$,
$\square\Sample_{ij} = [i\cdot r_x\Sample, (i+1)\cdot
r_x\Sample)\times[j\cdot r_y\Sample, (j+1)\cdot r_y \Sample)$.
Based on the partitioning $\Partition\Sample$ and the point-object
ownership relation, we propose two strategies, namely
sparsest-grid-cell (Section~\ref{sec:select:pos:grid}) and
sparsest-object (Section~\ref{sec:select:pos:object}) for choosing
$O^+$ objects, given an upper bound $\eta^+$ on the data size (in
number of tuples).

\subsubsection{Sparsest Grid Cell}
\label{sec:select:pos:grid}
The sparest-grid-cell strategy works as follows.  Examine the
partitions in non-descending order of their sizes.  For a partition
$\Partition\Sample_{ij}$, include in $\Obj^+$ all objects that
contribute at least one point in $\Partition\Sample_{ij}$, i.e.,
$\Obj^+ = \Obj^+ \cup \{k \mid \Result\Sample_k \cap \Partition
  \Sample_{ij} \ne \emptyset\}$.  Terminate when the budget $\eta^+$
is reached.

We illustrate the idea behind this strategy using the \emph{projection query}. 
The projection query simply projects the high-dimensional tuples onto a plane defined
by two given attributes.  Thus, the result on sample $\Result^S$ is
a random sample of the full result $\Result$ of size $\zeta
 \|\DataSet\|$.

It is known~\cite{tpa71-VapnikChervonenkis-rel_freq,arxiv12-Phillips-chernoff}
that for a $d$-dimensional point set $P$ of size $n$, and
a random sample $S$ of $P$ of size $k = (d / \epsilon^2) \log (2n /
  \delta)$, with probability at least $1-\delta$, for all
$d$-dimensional axis-aligned rectangle $R$:
\begin{equation*}
 \left|\frac{|P \cap R|}{|P|} - \frac{|S \cap R|}{|S|}\right|
   \le \epsilon.
\end{equation*}
For projection query, let $P = \Result$, $k = \zeta \|\DataSet\|$,
$r_x\Sample = r_x$, and $r_y\Sample = r_y$, it follows that with
probability at least $1 - 2\|\DataSet\| \cdot \exp(\zeta \|\DataSet\|
 \epsilon^2 / 2)$, for all $q \in \Result\Sample$:
\begin{equation*}
 \left|\frac{|\Neighbor_{\Result}(q; r_x, r_y)|}{\|\DataSet\|} -
   \frac{|\Neighbor_{\Result\Sample}(q; r_x\Sample, r_y\Sample)|}
   {\zeta\|\DataSet\|}\right| \le \epsilon.
\end{equation*}

This means, with high probability, for all points of $\Result\Sample$,
its neighborhood density is close to its neighborhood density in
$\Result$.  Therefore, a point of $\Result\Sample$ with a sparser
neighborhood has a higher probability of being present in $\RePre$.

Similar analysis can be conducted for the other query types as well.
While the sparsest-grid-cell strategy is oblivious to the behavior
of the query evaluation function $f$, it follows the idea behind the
analysis above by choosing points with sparest neighborhood.

% We illustrate the idea behind this strategy using one of the three
% types of queries, namely the \emph{count query}.  For a given
% attribute $A$ of the schema, the count query evaluted on the data
% $\R_i$ of object $i$ returns $(v, c)$ pairs for each possible value
% $v$ under attribute $A$ and $c$ being the number of tuples whose
% value under $A$ is at least $v$.
% 
% For simplicity, let $r_x = r_x\Sample = 1$.  Fix a value $v$ and
% consider the counts of miniminum value $c_1, \dots, c_N$ for all
% objects.  Let $X_i$ denote the number of tuples whose value is at
% least $v$ in the sample $\R_i\Sample$.  Given that $\R_i\Sample$ is
% sampled uniformly and independently at random with replacement from
% $\R_i$, at rate $\zeta$, it is clear that $X_i\sim Bin(\lfloor \zeta
%   n_i \rfloor, \rho_i)$, where $\rho_i = \tfrac{c_i}{n_i}$.  Let
% $\Neighbor_i \Sample = \{j \mid |X_i-X_j| \le r_y\Sample\}$ denote
% the number of neighbors of point $(v, X_i)$ in the sample result
% space.  We would like to bound $|\Neighbor_i\Sample|$ for each object
% $i$.
% 
% Consider $X_i$ known, so that events $X_i-X_j \le r_y\Sample$ are
% independent of each other for all $j\ne i$.  By Chernoff's bound,
% for $\tau' \in (0, \mu_{|\Neighbor_i\Sample|})$, we have
% \begin{align}
%   \mathbf{Pr}\left[|\Neighbor_i\Sample|\le \tau' \mid X_i \right]
%   &\le \exp\left[-\left(1-\frac{\tau'}{\mu_{|\Neighbor_i\Sample| \mid
%     X_i}}\right)^2 \mu_{|\Neighbor_i\Sample| \mid X_i} / 2\right] \\
%   &= \exp\left[-\frac{1}{2}\left(\mu_{|\Neighbor_i\Sample| \mid X_i}
%     + \frac{\tau'^2}{\mu_{|\Neighbor_i\Sample| \mid X_i}}\right) +
%     \tau'\right]\label{eq:nn-bound}
% \end{align}
% 
% Consider $X_i - X_j \mid X_i$, we have
% \begin{align*}
%  \mu_{X_i-X_j \mid X_i}      &= X_i - \zeta c_j \\
%  \sigma^2_{X_i-X_j \mid X_i} &= \zeta c_j\left(1-\frac{c_j}{n_j}\right)
% \end{align*}
% 
% For object $j\ne i$, we can bound the probability of $(v, X_j)$ being
% a neighbor of $(v, X_i)$ using Chebyshev's inequality as follows.
% \begin{equation*}
%  \mathbf{Pr}\left[|X_i-X_j| \le r_y\Sample \mid X_i\right] \ge
%    1 - \frac{\sigma^2_{X_i-X_j \mid X_i} + \mu^2_{X_i-X_j \mid X_i}}
%    {(r_y\Sample)^2}
% \end{equation*}
% 
% Then $\mu_{|\Neighbor_i\Sample| \mid X_i}$ in Eq.~\ref{eq:nn-bound}
% can be bounded as follows.
% \begin{align*}
%  \mu_{|\Neighbor_i\Sample| \mid X_i}
%  =   &\mathbb{E}\left[|\{j \mid |X_i - X_j|\le r_y\Sample \mid X_i\}|\le \tau'\right] \\
%  =   &1 + \sum_{j\ne i}\mathbf{Pr}\left[|X_i - X_j|\le r_y\Sample \mid X_i \right] \\
%  \ge &1 + \sum_{j\ne i}\max\{1 - \frac{\sigma^2_{X_i-X_j \mid X_i} +
%       \mu^2_{X_i-X_j \mid X_i}}{(r_y\Sample)^2}, 0\}
% \end{align*}
% 
% Summing over all values of $X_i$, we get
% \begin{equation*}
%  \mathbf{Pr}\left[|\Neighbor_i\Sample|\le \tau'\right] = \\
%  \sum_{k=0}^{\lfloor \zeta n_i \rfloor}\rho_i^k(1-\rho_i)
%    ^{\lfloor \zeta n_i \rfloor-k} \textbf{Pr}
%    \left[|\Neighbor_i\Sample| \le \tau' \mid X_i = k\right]
% \end{equation*}
% where $\textbf{Pr} \left[|\Neighbor_i\Sample| \le \tau' \mid X_i = k
%   \right]$ can be bounded according to Eq.~\ref{eq:nn-bound}
% 
% Intuitively, what the bounds above suggest is that, for a point
% $(v, c_i)$ in a dense neighborhood of $\Result$, $(v, X_i)$ would
% tend to have many neighbors in expectation.  The probability of
% having as few as $\tau'$ neighbors in $\Result\Sample$ is small, and
% that probability decreases exponentially as the expected number of
% neighbors $\mu_{|\Neighbor_i\Sample|}$ increases.
% The sparsest-grid-cell strategy follows this idea by choosing points
% from the sparest neighborhood.

By Corollary~\ref{corollary:count}, for any point $p$ in a partition
$\Partition_{ij}\Sample$, its number of neighbors $|\Neighbor_{\Result
  \Sample}(p; r_x\Sample, r_y\Sample)|$ is bounded from below by
$|\Partition_{ij}\Sample|$.  We use this lower bound instead of
counting the exact number of neighbors for each point of $\Result
  \Sample$ for efficiency reasons.


\subsubsection{Sparsest Object}
\label{sec:select:pos:object}
An object that contributes to $\RePre$ might not be selected by the
sparsest-grid-cell strategy due to an ``unfortunate'' draw of sample.
However, if the ``overall quality'' of the object is good, we can
hope to reduce the role of luck in this process by considering the
overall sparsity of points produced by this object in $\Result\Sample$.

For each object $i$, we define its overall sparsity as
\begin{equation*}
 \mu_i = \textsf{mean}_{p\in \Result\Sample \cap f(\R_i\Sample)}
   \{|\Partition_{ij} \Sample| \mid p\in\Partition_{ij}\Sample\}.
\end{equation*}
In other words, for each object $i$, we consider the mean neighborhood
sparsity of all points in $\Result\Sample$ that are produced by
object $i$.  Again, the partition size is used as an approximation
for the actual number of neighbors for efficiency reasons.

In fact, the sparsest-grid-cell strategy can be considered as a
special case of the sparsest-object strategy with $p=-\infty$ for
the power mean function
\begin{equation}\label{eq:power-mean}
  \EuScript{M}_p(x_1, x_2, ... x_n) = \left(\frac{1}{n}\sum_{i=1}^n
    x_i^p \right)^{1/p}
\end{equation}
It is known that $\EuScript{M}_{-\infty}(x_1, x_2, ..., x_n) =
  \min(x_1, x_2, ..., x_n)$, representing the strategy deployed by
sparsest-grid-cell.

We experiment this strategy with three other instantiations of the
power mean function $\EuScript{M}$, namely \emph{arithmetic mean}
($p=1$), \emph{geometric mean} ($p=0$), and \emph{harmonic mean}
($p=-1$).

% We compare them head to head with the sparsest-grid-cell
% strategy, and show their performance in Section~\ref{sec:expr}.

\subsection{Selecting $\Obj^-$}
\label{sec:select:neg}

We adopt a simple strategy for selecting the object set $\Obj^-$---given $\Obj^+$, include each object of $\Obj \setminus \Obj^+$
independently with probability $p$.  The multiplier in Algorithm
\ref{algo:sample} is set to $\lambda = 1 / p$ to maintain expectation,
i.e. for any point in $\Result \setminus \Result^+$, its expected
frequency in $\Result^-$ is $\lambda \cdot p = 1$.

Another way to think of this strategy is to sample points from
$\Result$ with correlation.  For any object $i \in \Obj \setminus
  \Obj^+$, the sample point set $\Result^-$ either includes all
points of $f(\R_i)$, or none of it.

$\Obj^-$ affects the quality of output by Algorithm~\ref{algo:sample}
in several ways.  First, not all points of $\Result^+$ lead to points in
$\RePre$ after evaluating $f$ on $\Obj^+$.  It is up to $\Obj^-$
and $\Result^-$ to exclude false positives and include true positives
(line~\ref{line:sample:count1}).  Second, the quality of $\ReSke$ is
determined primarily by $\Result^-$ (line~\ref{line:sample:count2}).

\subsubsection{Budget Constraint}
\label{sec:select:neg:budget}
Note that we need to comply with the total budget constraint $\eta$.
Let $X_i\sim Ber(p)$ be the Bernoulli random variable denoting
if object $i$ is chosen to be included in $\Obj^-$, for $i \in \Obj
  \setminus \Obj^+$.  All $X_i$'s are independent of each other.
Let $Y$ be the total number of tuples for objects in $\Obj^-$.
Let $\|\Obj^+\|$ denote the number of tuples for objects in $\Obj^+$.
Following notations from Section~\ref{sec:problem:prelim}, the
expected number of tuples for $\Obj^-$ is given by
% \begin{align*}
%  \mu_Y
%    &= \mathbb{E}\left[\sum_{i \in \Obj \setminus \Obj^+} X_i n_i\right] \\
%    &= \sum_{i \in \Obj \setminus \Obj^+} \mathbb{E}[X_i] n_i \\
%    &= p \cdot \sum_{i \in \Obj \setminus \Obj^+} n_i \\
%    &= p \cdot (\|\DataSet\| - \|\Obj^+\|)
% \end{align*}
\begin{equation*}
 \mu_Y = \mathbb{E}\left[\sum_{i \in \Obj \setminus \Obj^+} X_i n_i\right]
   = \sum_{i \in \Obj \setminus \Obj^+} \mathbb{E}[X_i] n_i
%    = p \cdot \sum_{i \in \Obj \setminus \Obj^+} n_i
   = p \cdot (\|\DataSet\| - \|\Obj^+\|).
\end{equation*}
The variance in the number of tuples in $\Obj^-$ is given by
% \begin{align*}
%  \sigma_Y^2
%    &= \text{Var}\left[\sum_{i \in \Obj \setminus \Obj^+} X_i n_i\right] \\
%    &= \sum_{i \in \Obj \setminus \Obj^+} n_i^2 \text{Var}[X_i] \\
%    &= p(1-p) \cdot \sum_{i \in \Obj \setminus \Obj^+} n_i^2
% \end{align*}
\begin{equation*}
 \sigma_Y^2
   = \text{Var}\left[\sum_{i \in \Obj \setminus \Obj^+} X_i n_i\right]
%    = \sum_{i \in \Obj \setminus \Obj^+} n_i^2 \text{Var}[X_i]
   = p(1-p) \cdot \sum_{i \in \Obj \setminus \Obj^+} n_i^2.
\end{equation*}
By (one-sided) Chebyshev's inequality, we have
\begin{equation*}
 \mathbf{Pr}\left[Y \ge (1 + \Delta) \cdot \mu_Y\right] \le
   \frac{1}{1 + (\Delta \mu_Y / \sigma_Y)^2}.
\end{equation*}
By setting $(1 + \Delta) \mu_Y = \eta\cdot\|\DataSet\| - \|\Obj^+\|$,
we have
\begin{equation*}
 \mathbf{Pr}\left[Y + \|\Obj^+\| \ge \eta\cdot\|\DataSet\|\right] \le
   \frac{1}{1 + (\Delta \mu_Y / \sigma_Y)^2},
\end{equation*}
where
\begin{equation*}
 \Delta = \frac{\eta\cdot\|\DataSet\| - \|\Obj^+\|}{\mu_Y} - 1.
\end{equation*}
Since $\mu_Y$ is monotone in $p$, choosing a smaller value of $p$
gives a better chance of complying with the budget constraint $\eta$.
%
\subsubsection{Quality of $\RePreProx$}
\label{sec:select:neg:precise}
We study how $\Obj^-$ affects the quality of $\RePreProx$ in two ways.
\begin{itemize}
 \item[1.] For a point in $\Result^+\cap\RePre$, what is the minimum
   probability that it is included in $\RePreProx$?
 \item[2.] For a point in $\Result^+\setminus\RePre$, what is the
   maximum probability that it is included in $\RePreProx$?
\end{itemize}
For a point $q \in \Result^+\cap f(\R_i)$, i.e., a point of $\Result^+$
coming from object $i$, let $C_j = |\Neighbor_\Result(q) \cap
  f(\R_j)|$ be the number of neighbor of $q$ in $\Result$ coming from
object $j\ne i$.  We attempt to provide a bound for the two questions
above in the worst case, where all $q$ have at least one neighbor in $\Result^+$.
Let random variable $Z$ denote the estimated
number of $q$'s neighbors.  Following the notation of random variable
$X_i$'s from Section~\ref{sec:select:neg:budget}, $Z$ can be written
as follows:
\begin{equation*}
 Z = 1 + \lambda \cdot \sum_{j\ne i} X_j C_j.
\end{equation*}
And we have
% \begin{align*}
%  \mu_Z      &= 1 + \sum_{j\ne i} C_j = |\Neighbor_\Result(q)|\\
%  \sigma_Z^2 &= \frac{1-p}{p} \cdot \sum_{j\ne i} C_j^2
% \end{align*}
\begin{equation*}
 \mu_Z      = 1 + \sum_{j\ne i} C_j = |\Neighbor_\Result(q)|,\quad
 \sigma_Z^2 = \frac{1-p}{p} \cdot \sum_{j\ne i} C_j^2.
\end{equation*}

\begin{itemize}
 \item[1.] If $\mu_Z\le\tau$, by Chebyshev's inequality, we have
   \begin{equation*}
    \mathbf{Pr}[Z > \tau] \le \frac{1}{1 + (\mu_Z - \tau)^2 /
     \sigma_Z^2}.
   \end{equation*}
 \item[2.] If $\mu_Z > \tau$, symmetrically, we have
   \begin{equation*}
    \mathbf{Pr}[Z \le \tau] \le \frac{1}{1 + (\mu_Z - \tau)^2 / 
     \sigma_Z^2}.
   \end{equation*}
\end{itemize}

These bounds suggest a couple of things.  First, the farther $\mu_Z$
deviates from $\tau$, the more confident we can be that Algorithm
\ref{algo:sample} will make the right decision on whether to include $q$ in
$\RePreProx$.  In other words, it is harder to classify points
correctly whose actual neighborhood density in $\Result$ is close to
the sparsity threshold $\tau$.  Also, since $\tau$ is presumably
small, in general it is harder to classify points of $\RePre$
correctly than $\Result \setminus \RePre$, i.e., high recall is harder
to achieve than high precision.  Second, a larger budget for $\Obj^-$
would lead to a larger $p$, thus higher confidence in classification.

\subsubsection{Quality of $\ReSke$}
\label{sec:select:neg:sketch}
Recall that the sketch distance (defined in Section
\ref{sec:problem:defn}) is used to measure the quality of a sketch
$\ReSke$ w.r.t.\ $\Result \setminus \RePre$.  While the exact sketch
distance is not easy to come by, an upper bound can be obtained as
follows.

Ignore $\RePre$ for now.  Partition the result set $\Result$ such that
any two points in the same partition are neighbors.  For example,
under $L_\infty$-norm neighborhood definition, partition $\Result$
into grid cells each of size $r_x \times r_y$.  Let $P_1, \dots, P_m$
denote the resulting partitions.  Let
$Z_j = \lambda \cdot |\Result \cap P_j|$ be the estimated number of
points in partition $P_j$ by the sketch.  Let $Z = \sum_j Z_j$ be the
estimated total number of points.

We have the following bound on $\delta(\Result, \ReSke)$, expressed in
terms of the means and variances of $Z_j$'s and $Z$.
\begin{multline}
     \mathbf{Pr}\left[\delta(\Result, \ReSke) \ge 1 -
      \frac{1 - \Delta^-}{1 + \Delta^+}\right] \\
 \le \sum_j \frac{1}{1 + (\Delta^-\mu_j / \sigma_j)^2} +
   \frac{1}{1 + (\Delta^+\mu_Z / \sigma_Z)^2}.
\end{multline}
% By linearity of expectation, we have
% \begin{align*}
%   \mu_j &= \mathbb{E}[Z_j] = |\Result \cap P_j| \\
%   \mu_Z &= \mathbb{E}[Z] = |\Result|
% \end{align*}

% It is easy to see the following bound on $\delta(\Result, \ReSke)$.
% \begin{align*}
%  \delta(\Result, \ReSke)
%    &\le 1 - \frac{\sum_j \min\{Z_j, |\Result \cap P_j|\}}
%      {\max\{Z, |\Result|\}}\\
%    &= 1 - \frac{\sum_j \min\{Z_j, \mu_j\}}{\max\{Z, \mu_Z\}}
% \end{align*}

% \begin{equation*}
%  \delta(\Result, \ReSke)
% %    &\le 1 - \frac{\sum_j \min\{Z_j, |\Result \cap P_j|\}}
% %      {\max\{Z, |\Result|\}}\\
%    \le 1 - \frac{\sum_j \min\{Z_j, \mu_j\}}{\max\{Z, \mu_Z\}}
% \end{equation*}

% Let $\sigma_j^2$ and $\sigma_Z^2$ denote the variance of $Z_j$ and
% $Z$, respectively.
% By the (one-sided) Chebyshev's inequality, we have
% \begin{align*}
%      &\mathbf{Pr}\left[\sum_j\min\{Z_j, \mu_j\} \ge
%       (1 - \Delta^-)\mu_Z\right]\\
%  \ge &\mathbf{Pr}\left[\bigcap_j Z_j \ge (1 - \Delta^-)\mu_j\right] \\
%  \ge &1 - \mathbf{Pr}\left[\bigcup_j Z_j \le (1 - \Delta^-)\mu_j\right] \\
%  \ge &1 - \sum_j \frac{1}{1 + (\Delta^-\mu_j / \sigma_j)^2}
% \end{align*}
% and
% \begin{align*}
%      &\mathbf{Pr}\left[\max\{Z, \mu_Z\} \ge (1 + \Delta^+)\mu_Z\right] \\
%  =   &\mathbf{Pr}\left[Z \ge (1 + \Delta^-)\mu_Z\right] \\
%  \le &\frac{1}{1 + (\Delta^+\mu_Z / \sigma_Z)^2}
% \end{align*}
% Combining the two inequalities above, we have
% \begin{align*}
%      &\mathbf{Pr}\left[\delta(\Result, \ReSke) \ge 1 -
%       \frac{1 - \Delta^-}{1 + \Delta^+}\right] \\
% %  \le &\mathbf{Pr}\left[1 - \frac{\sum_j \min\{Z_j, \mu_j\}}
% %    {\max\{Z, \mu_Z\}} \ge 1 - \frac{1 - \Delta^-}{1 + \Delta^+}
% %    \right]\\
%  \le &\mathbf{Pr}\left[\sum_j\min\{Z_j ,\mu_j\} \le (1 - \Delta^-)\mu_Z
%    \lor Z \ge (1 + \Delta^-)\mu_Z\right] \\
%  \le &\sum_j \frac{1}{1 + (\Delta^-\mu_j / \sigma_j)^2} +
%    \frac{1}{1 + (\Delta^+\mu_Z / \sigma_Z)^2}
% \end{align*}

% To get a more concrete understanding of the bound, further let
% $c_{ij}$ denote the number of points in partition $P_j$ that come
% from object $i$, i.e., $c_{ij} = |f(\R_i) \cap P_j|$, and $C_i =
%   \sum_j c_{ij} = |f(\R_i)|$.  It follows that $Z_j = \lambda \cdot
%   \sum_i X_i c_{ij}$. We have for each $i$,
% \begin{equation*}
%  \mu_i      = \sum_j c_{ij},~
%  \sigma_i^2 = \frac{1-p}{p} \cdot \sum_i c_{ij}^2
% \end{equation*}
% and for $Z$,
% \begin{equation*}
%  \mu_Z      = \sum C_j,~
%  \sigma_Z^2 = \frac{1-p}{p} \cdot \sum_i C_i^2
% \end{equation*}

% For any two partitions $j$ and $j'$, the covariance and correlation
% between $Z_j$ and $Z_{j'}$ are given by
% \begin{equation*}
%  \sigma_{jj'} = \frac{1-p}{p} \cdot \sum_i c_{ij}c_{ij'},~
%  \rho_{jj'}   = \frac{\sigma_{jj'}}{\sigma_j\sigma_{j'}}
% \end{equation*}

Tighter bounds can be obtained by taking into account the correlation
among $Z_j$'s.
% , using the dependent multi-variate Chebyshev's
% inequality as follows,
% \begin{align*}
%      &\mathbf{Pr}\left[\bigcap_j Z_j \ge (1 - \Delta^-)\mu_j\right] \\
%  \le &1 - \frac{1}{m^2}\left(\sqrt{u} + \sqrt{m-1} \sqrt{\frac{m}
%        {(\Delta^-)^2} \sum_j \frac{\sigma_j^2}{\mu_j^2} - u} \right)^2
% \end{align*}
% where
% \begin{align*}
%  u = \frac{1}{(\Delta^-)^2} \sum_j \sum_{j'} \frac{\rho_{jj'}}
%    {\mu_j \mu_{j'}}
% \end{align*}
When the distributions of number of points by objects are identical
in all partitions, we have
\begin{equation}
\label{eq:bounds-identical-distn}
 \mathbf{Pr}\left[\delta(\Result, \ReSke) \ge 1 - \frac{1 - \Delta}
   {1 + \Delta}\right] \le \frac{2}{1 + (\Delta\mu_Z / \sigma_Z)^2}.
\end{equation}

If, on top of the identical distribution scenario, $C_i$'s are all
equal, we have $\frac{\mu_Z^2}{\sigma_Z^2} = \frac{Np}{1-p}$, and
\begin{equation}
\label{eq:bounds-identical-counts}
 \mathbf{Pr}\left[\delta(\Result, \ReSke) \ge 1 - \frac{1 - \Delta}
   {1 + \Delta}\right] \le \frac{2}{1 + \Delta^2Np/(1-p)}.
\end{equation}

On the other hand, in the worse case where all $Z_j$'s are independent
and $\mu_j = \sigma_j$, we would have
\begin{equation}
\label{eq:bounds-independent-distn}
 \mathbf{Pr}\left[\delta(\Result, \ReSke) \ge 1 - \frac{1 - \Delta}
   {1 + \Delta}\right] \le \frac{N + 1}{1 + \Delta^2p/(1-p)}.
\end{equation}

Taking $\RePre$ back into account, since the frequency counts in
$\Result^+$ are all precise (estimates with zero variance), including
$\RePre$ does not nullify any results above.

Proofs of the above bounds and additional remarks can be found in the
appendix.
% Note that the bound on $\mathbf{Pr}\left[\sum_j\min\{Z_j, \mu_j\} \ge
%   (1 - \Delta^-)\mu_Z\right]$ is still quite loose.  
Note that these bounds may still be quite loose.  We will show
the quality of sketch produced by Algorithm~\ref{algo:sample} via
empirical results in Section~\ref{sec:expr}.

