\section{One-sample Test}
\label{sec:onesample}

% As mentioned earlier, the algorithms in this paper make use of quantile $\epsilon$-sketches to compute the KS-statistic for any data stream. In fact, the entire streaming algorithm consists of updating a quantile $\epsilon$-sketch with the entries in the stream while also keeping track of the length of the stream. The guarantee of the algorithm is not affected by the choice of sketch, as long as the sketch itself has an $\epsilon$-accuracy guarantee. The algorithm also inherits the space usage of the sketch (e.g., $O(\frac{1}{\epsilon}\log{(\epsilon n)})$ for the Greenwald-Khanna sketch). In addition, our algorithm can be generalized to other models (e.g., distributed computing) by using the appropriate sketch (e.g., the q-digest sketch). 

Our algorithm uses a quantile sketch (e.g., \cite{GK01}) to maintain the quantiles in a stream (in a single pass) with the following guarantee (for any fixed $\epsilon > 0$): For any given rank $r$, the quantile sketch will return an element whose rank is within the range $[r - \epsilon n, r + \epsilon n]$. Note that computation of the KS-statistic requires not the quantiles themselves but the ``inverse'' of the quantiles, and so the main technical challenge of the algorithm and analysis is to carefully computes these values at all the necessary points. In order to do this, we prove the following observations (needed in the analysis of our algorithms):

\begin{observation}
\label{obs:sketchvalues}
It is possible to extract from a quantile $\epsilon$-sketch a subset $\{X_{i_1}, \ldots X_{i_k}\} \subseteq \{X_1, \ldots, X_n\}$ (where $X_1 \leq X_2 \leq X_3 \leq \ldots \leq X_n$) such that $i_1 < i_2 < i_3 < \ldots < i_k$ and, for all $1 \leq j < k$, $i_{j+1} - i_j < 2\epsilon n$.
\end{observation}

\begin{proof}
Since a quantile sketch is guaranteed to return only values from the original data stream, any values that the sketch contains can be extracted via querying for the $i$th largest element (for $1 \leq i \leq n$) from the sketch. (See Appendix~\ref{app:extract} for details.) Let $X_{i_1} \leq \ldots \leq X_{i_k}$ be the values that result from these queries.

Now, fix any $j \in \{1, \ldots, k-1\}$. If it is the case that $i_{j+1} - i_j > 2\epsilon n$, then there must be some $i'$, $i_j < i' < i_{j+1}$, such that querying the sketch for the $i'$th largest element will give a value with rank error at least $\epsilon n$, a contradiction. Hence, it must be that $i_{j+1} - i_j \leq 2\epsilon n$.
\end{proof}

\begin{observation}
\label{obs:sketcherror}
Given some value $X_i$ returned by a quantile $\epsilon$-sketch (where $X_i$ is the $i$th largest element in the input), it is possible to estimate $i$ to within $\epsilon n$ additive error.
\end{observation}

\begin{proof}
Performing a binary search among the indices $1, \ldots, n$ for the value $X_i$ will give the desired approximation to the index.
%an index $j$ such that $j \in [i - \epsilon n, i + \epsilon n]$.
A detailed description of this algorithm is given in Appendix~\ref{app:binsearch}.
\end{proof}


% We first show that using such a sketch gives a low-error approximation to the KS-statistic.

Recall that our goal is to compute the KS-statistic of the empirical distribution of a set of points $X_1, \ldots, X_n$ (where $X_1 \leq X_2 \leq X_3 \leq \ldots \leq X_n$), denoted by $F_n$, from some arbitrary distribution $F$ via the formula 
$$D_n = \sup_x |F(x) - F_n(x)|.$$
We achieve an approximation $\hat{D}$ to this value $D_n$ by using a quantile $\epsilon$-sketch to summarize the data being streamed and then comparing the result to the fixed distribution $F$. The pseudocode for the comparison is given in Algorithm~\ref{alg:one-sample}. Note that the streaming part of the algorithm (the quantile sketch) is independent of the distribution $F$.

\begin{algorithm}[tb]
\caption{OneSample($Q$, $n$, $F$)} 
\label{alg:one-sample}

{\bf Input:} Quantile $\epsilon$-sketch $Q$ of a stream of size $n$, and a distribution function $F$

{\bf Output:} $\hat{D}$, an estimate of the KS-statistic $D$

\begin{algorithmic}[1]
\STATE Let $X_{i_1} \leq \ldots \leq \ldots \leq X_{i_k}$ be the values in $Q$, as described in Observation~\ref{obs:sketchvalues}.
\STATE $\hat{D} = 0$
\FOR{each $x \in \{X_{i_1}, \ldots, X_{i_k}\}$}
\STATE Let $j = \max{\{p~|~X_{i_p} \leq x\}}$.
\STATE Let $\hat{i}_j$ be the approximate index of $X_{i_j}$, computed as described in Observation~\ref{obs:sketcherror}.
\STATE $\hat{E}_x = |\hat{i}_j/n - F(x)|$ 
\STATE $\hat{D} = \max{(\hat{D}, \hat{E}_x)}$
\ENDFOR
\STATE return $\hat{D}$
\end{algorithmic}
\end{algorithm}


\begin{theorem}
\label{thm:onesampleguarantee}
Algorithm~\ref{alg:one-sample} returns an estimate of the KS-statistic with at most $3\epsilon$ additive error.
\end{theorem}

\begin{proof}
For any $x$, let $E_x = |F(x) - F_n(x)|$. Recall that our goal is to compute $D_n = \max_x E_x$. Now, let $X_1 \leq \ldots \leq X_n$ be the data in the stream (in ascending order) and let $X_{i_1} \leq \ldots \leq X_{i_k}$ be the values in the sketch, as computed in line 1 of Algorithm~\ref{alg:one-sample}. We first show how to estimate $F_n(x)$ approximately using the sketch $Q$.

For any $x$, let $i$ be such that $X_i \leq x < X_{i+1}$, i.e., the largest index of the data that is at most $x$. Then, by definition, $F_n(x) = i/n$. Let $j$ be the largest index such that $X_{i_j} \leq x < X_{i_{j+1}}$. Note that this corresponds with the value $j$ computed on line 4 of Algorithm~\ref{alg:one-sample}. We now use the fact that $i$ was chosen to be the largest index such that $X_i \leq x < X_{i+1}$, and the fact that $\{X_{i_1}, \ldots, X_{i_k}\} \subseteq \{X_1, \ldots, X_n\}$ to get that
\begin{align}
\label{eq:squeeze1}
X_{i_j} \leq X_i \leq x < X_{i+1} \leq X_{i_{j+1}}.
\end{align}
This follows since $i$ was chosen to be the maximal such value and the sketch has a subset of the $\{X_i\}$'s.

We use the fact that Observation~\ref{obs:sketchvalues} tells us that $i_{j+1} - i_j \leq 2\epsilon n$. Combining this with the above inequalities, and the fact that the sequences $\{X_i\}$ and $\{X_{i_j}\}$ are monotonic, we get that 
\begin{align}
\label{eq:indexbound}
i - i_j \leq 2\epsilon n. 
\end{align}

Now, line 4 of Algorithm~\ref{alg:one-sample} gives the value of $X_{i_j}$, but not the value of the index $i_j$. To compute this, we make use of Observation~\ref{obs:sketcherror} to compute an estimate $\hat{i_j}$ in line 5. This estimate is guaranteed to have at most $\epsilon n$ additive error. Putting this together with Eq.~\ref{eq:indexbound}, using the triangle inequality, we get that 
\begin{align}
\label{eq:onesamplebound}
|i - \hat{i_j}| \leq 3\epsilon n.
\end{align}

We now have, for any given $x$, an estimate of $F_n(x)$ computed as $\hat{i_j}/n$ with at most $3\epsilon$ additive error from the actual value $i/n$. 

Lastly, instead of computing $\hat{E_x}$ (the estimate of $E_x$) for every $x$, we restrict the computation to just the values extracted from the sketch since these are the critical values at which the empirical distribution function changes.
\end{proof}












\begin{comment}
Consider first the quantity $K^{+}_n$ defined as $K^{+}_n = \sqrt{n} \max_{1 \leq i \leq n} (i/n - F(X_i))$. To compute this quantity, we can simply query the quantile data structure for the $i$th largest element in the stream for each $1 \leq i \leq n$ and use this to approximate  $K^{+}_n$. Let us assume that for any $i$, the quantile data structure returns $X_j$  instead of $X_i$; that is, this approximation replaces the value $(i/n - F(X_i))$ with $(i/n - F(X_j))$ in the maximization. Recall that the guarantee of the quantile data structure is that $j \in [i - \epsilon n, i + \epsilon n]$. Putting these two facts together, we get that 
$$j/n  - F(X_j) - \epsilon \leq  (i/n - F(X_j)) \leq j/n - F(X_j) + \epsilon.$$
If $(i/n - F(X_j))$ is an overestimate for $(i/n - F(X_i))$, then it only overestimates $K^{+}_n$ by at most $\epsilon$ since $K^{+}_n \geq j/n  - F(X_j)$. On the other hand, if $(i/n - F(X_j))$ is an underestimate for $(i/n - F(X_i))$, then it only affects the maximization if $K^{+}_n = (i/n - F(X_i))$, in which case  


% To compute the KS-statistic, we need to be able to find the maximum of $|F(v) - G(v)|$ over all values $v$. Fortunately, rather than having to check all (possibly infinite) such values, we can take advantage of the fact that the empirical distribution is discrete and only check at the values that correspond to $F(v)$ values that correspond with the $i/n$ quantiles for $i \in \{0, \ldots, n\}$. Fix one such $i$ and let $v$ be the smallest value such that $F(v) = i/n$. We would like to compute $|F(v) - G(v)|$ for this value. It is not possible to know this value $F(v)$ exactly (short of storing the entire stream), but fortunately the aforementioned streaming quantile data structures can give us a value $v'$ such that $|F(v) - F(v')| \leq \epsilon n /n = \epsilon$. We show next that this returned value can be used to approximate the KS-statistic (where $G$ is completely known) using the estimator $E_i = |F(v') - G(v)|$. First, we see that $E_i$ is not too high:
%\begin{eqnarray*}
%  |F(v') - G(v)| &\leq& |F(v) - G(v)| + |F(v') - F(v)|\\
%                 &=& |F(v) - G(v)| + \epsilon,
%\end{eqnarray*} 
%where the first line comes from the triangle inequality. Similarly, we can bound $E_i$ from below
%\begin{eqnarray*}
%  |F(v') - G(v)| &\geq& |F(v) - G(v)| - |F(v') - F(v)|\\
%   &=& |F(v) - G(v)| - \epsilon,
%\end{eqnarray*}
%once again using the triangle inequality. Putting this together, we get that $E_i$ is within $\epsilon$ additive error of $|F(v) - G(v)|$, where $v$ corresponds to $i$. Hence, we can check all the quantiles $i/n$ ($0 \leq i \leq n$) and compute the maximum $E = \max_{0 \leq i \leq n} E_i$ as our estimate for the KS-statistic and be certain that the resulting answer is correct to within $\epsilon$.
\end{comment}


\subsection{Computational Analysis}

The streaming part of the algorithm is identical to that of whichever quantile $\epsilon$-sketch is employed by the algorithm. For instance, in the case of the Greenwald-Khanna sketch, summarizing $n$ data points with up to $\epsilon$ error can be done using at most $O(\frac{1}{\epsilon}\log{(\epsilon n)})$ space and time per update. Since the bound given by the algorithm with $\epsilon' = 3\epsilon$ is off by a constant from this guarantee, it is easy to see that the same asymptotic guarantee is possible (replacing $\epsilon$ with $\epsilon'$).

The computational complexity of measuring the KS-distance is less important since this can be done offline, well after the stream is processed, but we analyze it here anyway. The running time of Algorithm~\ref{alg:one-sample} is dominated by the time needed to extract the the values $X_{i_j}$ from the quantile sketch. As can be seen in Appendix~\ref{app:extract}, this takes $O(n)$ query operations to the sketch. In contrast, the rest of the algorithm is relatively fast since, if there are $s = o(n)$ unique values stored in the sketch (e.g., $s = O(\frac{1}{\epsilon}\log{(\epsilon n)})$ for the Greenwald-Khanna sketch), then the algorithm iterates $s$ times and performs $O(\log{s})$ computations for the binary search (see Appendix~\ref{alg:reverse-quantile}) on line 5 of Algorithm~\ref{alg:one-sample}, giving a running time of $O(s\log{s})$, which is much less than the initial query operations. 
