\section{The Diversification Algorithm}
\label{sec:algorithm}

%\debmalya{This section is being modified.}
In this section, we describe the diversification algorithm, and
use analytical techniques to show that it proves 
Theorem~\ref{thm:main}. The 
algorithm uses the expected optimal value of the objective function
denoted by $c_{\opt}$.
If $c_{\opt}$ is not known, we can guess $c_{\opt}$, and update our
guess repeatedly by doubling, as outlined in the previous section.
Since the input stream of items is drawn i.i.d. from some (unknown)
probability distribution, it can be shown that the competitive ratio 
of the algorithm remains a constant even if the expected optimal value 
of the objective function is not known to the algorithm. However, for
the sake of simplicity, we assume throughout that we know $c_{\opt}$. 

As sketched in the previous section, our algorithm uses a reward function 
$\phi$ defined as
\begin{equation*}
%	\phi(k) = \beta n^{-\alpha (k/c_{\opt})},
	\phi(k) = n^{-\alpha (k/c_{\opt})},
\end{equation*}
where $\alpha$ is a constant that we will fix later.
%and
%\begin{equation*}
%	\beta = \frac{\alpha \ln n}{c_{\opt}}.
%\end{equation*} 
We also define 
\begin{equation*}
	\bar{\Phi}(k) = \int_{j=k}^{\infty} \phi(j) dj.
\end{equation*}
If the current collection of selected items has fractional coverage $c_i$ for some 
feature $i\in F_j$ for the current item $j$, then the reward $r_{ij}$ of item $j$ 
due to feature $i$ is defined as $\phi(c_i)$. The overall reward $r_j$ of item $j$ 
is defined as the sum of rewards of its constituent features, i.e.
$r_j = \sum_{i\in F_j} r_{ij}$. 
%The input has $2\log n$ epochs, where each 
%epoch constitutes an arrival sequence of $m/2\log n$ sets. At the beginning 
%of every epoch, all features that already have coverage of at least $\lambda/2$
%are discarded. 
At any stage of the algorithm, the remaining reward for feature $i$ is 
%if $c_i$ is the fractional coverage on feature $i$, then
\begin{equation*}
	\bar{\Phi}_i = \bar{\Phi}(c_i),
\end{equation*}
and the overall remaining reward is
\begin{equation*}
	\bar{\Phi} = \sum_{i\in F} \bar{\Phi}_i.
\end{equation*}
The online algorithm selects the current item $j$ {\em if and only if} 
\begin{equation*}
	r_j \geq \frac{\bar{\Phi} \ln n}{\gamma B},
\end{equation*}
where $\gamma$ is a constant we will fix later. The algorithm terminates when either the
input stream has been exhausted or the algorithm has already selected $B$ items.

\medskip
\noindent
{\bf Analysis.}
First, we state a property of the reward function that we will use later
in the analysis of the algorithm.
\begin{fact}\label{fact:geometric}
	For any $k \geq 0$, $\bar{\Phi}(k) = \left(\frac{c_{\opt}}{\alpha \ln n}\right)\phi(k)$.
%	For any $k \geq 0$, $\bar{\Phi}(k) = \phi(k)$. 
%	In particular, $\Phi(0) = 1$.
\end{fact}
%\begin{proof}
%	Let $k = \lambda/4\log n$, $P = \sum_{j = 0}^{k-1} \phi(j)$ and 
%	$Q = \sum_{j = 0}^{\infty} \phi(j)$. Our goal is to show that $P \geq Q/2$;
%	this would immediately prove the lemma. Now,
%	\begin{equation*}
%		Q = \sum_{j = 0}^{\infty} (1 - 1/k)^j 
%		= \sum_{j = 0}^{k-1} (1 - 1/k)^j + \sum_{j = k}^{\infty} (1 - 1/k)^j 
%		= P + (1 - 1/k)^k \sum_{j = 0}^{\infty} (1 - 1/k)^j 
%		\leq P + Q/e.
%	\end{equation*}	
%	Therefore, 
%	\begin{equation*}
%		Q \quad \leq \quad \left(\frac{e}{e-1}\right) P 
%		\quad \leq \quad 2P.
%	\end{equation*}
%\end{proof}
%
\begin{figure}
	\centering
%	Maximize $\eta$ subject to
	\begin{eqnarray*}
		\sum_{S\subseteq F: i\in S} w_S p_S & \geq & \frac{c_{\opt} T_i}{m} \quad \forall~i \in F\\
		\sum_{S\subseteq F} w_S p_S & \leq & \frac{B}{m} \\
		0 \quad \leq & w_S & \leq \quad 1 \quad \forall~S\subseteq F
	\end{eqnarray*}
	\caption{A linear program for the \diverse problem.}
	\label{fig:lp}
\end{figure}
Now, consider the linear program (LP) for the \diverse problem in Fig.~\ref{fig:lp}.
Here, $p_S$ denotes the probability of item $j$ in the input stream having $F_j = S$ 
for any $j$, and $w_s$ is the fraction to which such an item
is chosen in the optimal fractional (offline) solution. Since the expected optimal value
of the objective is $c_{\opt}$, this LP is feasible.

Recall that $\rho_{\opt} = c_{\opt} \min_{i\in F} T_i$. For simplicity of notation,
let us also denote the expected value of $\rho_{\opt}$ by $\rho_{\opt}$ itself in the
rest of this section.
The next lemma lower bounds the probability that an item is chosen by our algorithm
if it has not already exhausted its budget. 
\begin{lemma}
\label{lma:expectedreward}
At any stage of the algorithm, the expected decrease in $\bar{\Phi}$ for the next item
in the input stream is at least $\left(\alpha - \frac{1}{\gamma}\right) \frac{\bar{\Phi}\ln n}{m}$.
Further, the probability that the next item in the
input stream is selected by the algorithm is at least 
$\left(1 - \frac{1}{\alpha \gamma}\right)\frac{\rho_{\opt}}{m}$.
\end{lemma}
\begin{proof}
Consider a hypothetical algorithm that chooses item $j$ having a feature set $F_j = S$ with probability 
$w_S$. The expected decrease of $\bar{\Phi}$ for this algorithm at any stage is
\begin{eqnarray*}
	& &\sum_{S\subseteq F} p_S w_S \sum_{i\in S} \frac{\phi(c_i)}{T_i}\\
	& = & \left(\frac{\alpha\ln n}{c_{\opt}}\right) \sum_{i\in F} \frac{\bar{\Phi}_i}{T_i} \sum_{S\subseteq F: i\in S} w_S p_S \\
	& \geq & \left(\frac{\alpha\ln n}{c_{\opt}}\right) \sum_{i\in F} \frac{\bar{\Phi}_i}{T_i} \left(\frac{c_{\opt} T_i}{m}\right) \\
	& = & \left(\frac{\alpha\ln n}{m}\right) \sum_{i\in F} \bar{\Phi}_i \\
	& = & \left(\frac{\alpha\ln n}{m}\right) \bar{\Phi}. 
\end{eqnarray*}
Let 
\begin{equation*}
	y_S = \sum_{i\in S} \frac{\phi(c_i)}{T_i}
\end{equation*}
and 
\begin{equation*}
	z_S = p_S w_S (m/B).
\end{equation*}
Then, we have 
\begin{eqnarray*}
	\sum_{S\subseteq F} y_S z_S & \geq & \left(\frac{\alpha\ln n}{B}\right) \bar{\Phi} \\
	\sum_{S\subseteq F} z_S & \leq & 1.
\end{eqnarray*}
By standard convexity arguments, we can conclude that
\begin{equation*}
	\sum_{S\subseteq F: y_S \geq \frac{\bar{\Phi}\ln n}{\gamma B}}  y_S z_S 
	\geq \left(\alpha - \frac{1}{\gamma}\right) \frac{\bar{\Phi}\ln n}{B},
\end{equation*}
which implies that the expected decrease in $\bar{\Phi}$ due to the next item in the
input stream is
\begin{equation*}
	\sum_{S\subseteq F: y_S \geq \frac{\bar{\Phi}\ln n}{\gamma B}}  w_S p_S y_S
	\geq \left(\alpha - \frac{1}{\gamma}\right) \frac{\bar{\Phi}\ln n}{m}.
\end{equation*}
Further, the maximum decrease in $\bar{\Phi}$ due to a single item is
\begin{equation*}
	\max_{S\subseteq F} \sum_{i\in S} \frac{\phi(c_i)}{T_i}
	\leq \sum_{i\in F} \frac{\phi(c_i)}{T_i}
	= \left(\frac{\alpha \ln n}{c_{\opt}}\right)\sum_{i\in F} \frac{\bar{\Phi}_i}{T_i}
	\leq  \left(\frac{\alpha \ln n}{\rho_{\opt}}\right) \bar{\Phi}. 
\end{equation*}
Since $w_S \leq 1$ for all $S\subseteq F$,
\begin{equation*}
	\sum_{S\subseteq F: y_S \geq \frac{\bar{\Phi}\ln n}{\gamma B}} p_S 
	\geq \sum_{S\subseteq F: y_S \geq \frac{\bar{\Phi}\ln n}{\gamma B}} p_S w_S 
	\geq \left(1 - \frac{1}{\alpha \gamma}\right)\frac{\rho_{\opt}}{m}. 
\end{equation*}
\end{proof}
\noindent
The above lemma implies that if the algorithm has not selected $B$ items already,
then the next item in the input stream is selected with probability
\begin{equation*} 
	p \geq \left(1-\frac{1}{\alpha \gamma}\right)\frac{\rho_{\opt}}{m},
\end{equation*} 
and if the next item is selected,
then the value of $\bar{\Phi}$ decreases to at most
\begin{equation*}
	\left(1 - \frac{1}{p}\left(\alpha - \frac{1}{\gamma}\right)\frac{\ln n}{m}\right)\bar{\Phi} 
	\leq n^{-\frac{\alpha-\frac{1}{\gamma}}{pm}}\bar{\Phi}.
\end{equation*} 
The next lemma asserts that if $\rho_{\opt} = \Omega(\ln n)$ and the algorithm 
does not select $B$ items, then the value of $\bar{\Phi}$ when the algorithm
terminates is small.
\begin{lemma}
\label{lma:full-input}
Suppose the algorithm does not select $B$ items. Further, let  
%\begin{equation*}
	$\rho_{\opt} \geq \frac{3 \ln n}{\epsilon^2 \left(1 - \frac{1}{\alpha \gamma}\right)}$.
%\end{equation*} 
Then, the value of $\bar{\Phi}$ when the algorithm
terminates is at most $n^{1 - (1 - \epsilon)\left(\alpha - \frac{1}{\gamma}\right)}$
with probability at least $1 - 1/n$. 
\end{lemma}
\begin{proof}
The expected number of items selected by the algorithm is
\begin{equation*}
	pm \geq \left(1 - \frac{1}{\alpha \gamma}\right) \rho_{\opt} 
	\geq \frac{3\ln n}{\epsilon^2}.
\end{equation*}
Therefore, by Chernoff bounds~\cite{MotwaniR97}, 
the value of $\bar{\Phi}$ when the algorithm terminates is at most
\begin{equation*}
	n^{-\left(\frac{\alpha - \frac{1}{\gamma}}{pm}\right)(1-\epsilon)pm} \cdot n 
	= n^{1 - (1 - \epsilon)\left(\alpha - \frac{1}{\gamma}\right)}
\end{equation*}
with probability at least $1-1/n$.
\end{proof}
\noindent
Finally, we consider the case when the algorithm uses up its entire 
budget, i.e. selects $B$ items.
\begin{lemma}
\label{lma:full-budget}
If the algorithm selects $B$ items, then the value of $\bar{\Phi}$
when the algorithm terminates is at most $n^{1 - \frac{1}{\gamma}}$.
\end{lemma}
\begin{proof}
When the algorithm selects an item,
the value of $\bar{\Phi}$ decreases to at most
\begin{equation*}
	\left(1 - \frac{\ln n}{\gamma B}\right)\bar{\Phi} \leq n^{-\frac{1}{\gamma B}}.
\end{equation*}
Therefore, the value of $\bar{\Phi}$
when the algorithm terminates after selecting $B$ items is at most
\begin{equation*}
	n^{-1/\gamma} \cdot n = n^{1 - \frac{1}{\gamma}}.
\end{equation*}
\end{proof}
\noindent
We now set $\gamma = \frac{2-\epsilon}{(1-\epsilon)\alpha}$ which lets us 
summarize the above two lemmas in the following lemma.
\begin{lemma}
\label{lma:combined}
If $\rho_{\opt} \geq \frac{3 (2-\epsilon) \ln n}{\epsilon^2}$,
then the value of $\bar{\Phi}$ when the algorithm
terminates is at most $n^{1 - \alpha\left(\frac{1-\epsilon}{2-\epsilon}\right)}$
with probability at least $1 - 1/n$. 
\end{lemma}
\noindent
The next lemma bounds the competitive ratio of the algorithm.
\begin{lemma}
\label{lma:competitive}
If $\rho_{\opt} \geq \frac{3(2-\epsilon) \ln n}{\epsilon^2}$,
then the competitive ratio of the algorithm is at most 
$\left(\frac{1-\epsilon}{2-\epsilon}\right) - \frac{1}{\alpha}$ 
with probability at least $1 - 1/n$.
\end{lemma}
\begin{proof}
Suppose not, and let $i_{\min}$ be the feature with the minimum fractional 
coverage at the end of the algorithm. Then, 
\begin{equation*}
	\bar{\Phi} 
	\geq \bar{\Phi}_{i_{\min}} 
	= n^{-\alpha \left(\frac{1-\epsilon}{2-\epsilon} - \frac{1}{\alpha}\right)}
	> n^{1 - \alpha\left(\frac{1-\epsilon}{2-\epsilon}\right)},
\end{equation*}
which violates Lemma~\ref{lma:combined}.
\end{proof}
\noindent
Observe that since $\epsilon < 1$,
\begin{equation*}
	\frac{1-\epsilon}{2-\epsilon} > \frac{1}{2} - \epsilon.
\end{equation*}
We now obtain Theorem~\ref{thm:main} by setting $\epsilon = \delta/2$ and 
$\alpha = 2/\delta$.

















\eat{

\begin{lemma}\label{lma:iid-coverage}
	Suppose $\lambda \geq 48\log^2 n$. Then, with high probability, every epoch 
	contains a collection of at most $\frac{B}{2\log n}$ sets that have a coverage 
	of at least $\frac{\lambda}{4\log n}$ on each feature.
\end{lemma}
\begin{proof}
	Let $\eta^*$ be the optimal value of the objective in the linear program (LP) given 
	in Fig.~\ref{fig:lp}, where $p_S$ is the probability of set $S$ in the input distribution. 
	Clearly, $\eta^*\geq \lambda$. Now, consider an algorithm that accepts set $S$ with
	probability $w_S$. For a sequence of $\frac{T}{2\log n}$ arrivals, the expected number of
	items accepted by this algorithm is $\frac{B}{2\log n}$. Further, for any feature $x\in X$,
	the expected coverage of the feature is 
	$\frac{\eta^*}{2\log n}\geq \frac{\lambda}{2\log n}$. In fact, since 
	$\frac{\eta^*}{2\log n} \geq \frac{\lambda}{2\log n} \geq 24 \log n$, by Chernoff bounds,
	the coverage of every feature is at least $\frac{\lambda}{4\log n}$ with probability at 
	least $1 - \frac{1}{n^2}$. The lemma follows by using the probabilistic method.
\end{proof}
\noindent
The next corollary is a direct consequence of the above lemma and 
Fact~\ref{fact:geometric}.
\begin{corollary}\label{cor:iid-reward}
	Suppose $\lambda \geq 48\log^2 n$. Then, with high probability, every epoch 
	contains a collection of at most $\frac{B}{2\log n}$ sets that have a total
	reward of at least $\bar{\Phi}/2$.
\end{corollary}
\begin{proof}
	Recall that each feature that contributes to $\bar{\Phi}$ has a coverage of
	at most $\lambda/2$ at the beginning of the epoch. For each such feature,
	by Fact~\ref{fact:geometric}, an additional coverage of 
	$\frac{\lambda}{2\log n}$ yields at least half of its contribution to 
	$\bar{\Phi}$ as reward. Lemma~\ref{lma:iid-coverage} guarantees such 
	coverage using at most $\frac{B}{2\log n}$ sets.
\end{proof}

Let $\psi$ be any non-increasing reward function, i.e. $\psi(i+1) \leq \psi(i)$ for 
all $i$. We consider the problem of maximizing reward by selecting $B$ sets from a
collection of $T$ subsets of $X$ that arrive online. Suppose $\Psi$ be the 
total reward of an offline optimal solution. (Note that even though the individual 
reward of a set in an accepted collection of sets depends on arrival order, the 
cumulative reward of the entire collection is independent of this order.) Now, 
consider an online algorithm $\cal A$ that accepts a set iff its reward is at least 
$\frac{\Psi}{4B}$ as long as it does not run out of its budget of $B$ sets.
\begin{lemma}\label{lma:competitive}
	Algorithm $\cal A$ collects a total reward of at least ${\Psi}/6$ for any order 
	of arrival of the subsets.
\end{lemma}
\begin{proof}
	If the online algorithm runs out of its budget of $B$ sets, then the lemma follows 
	trivially. Therefore, we assume that the algorithm accepts less than $B$ sets.
	Therefore, if the algorithm rejects a set, it is solely because its reward is
	less than $\frac{\Psi}{4B}$.

	For any feature $x\in X$, let its coverage in the offline optimal solution be 
	$\hat{c}(x)$. We order these $\hat{c}(x)$ accepted sets in arbitrary order; let
	$S(i, x)$ be the $i$th set in this order. Conversely, for any set $S$ in the 
	optimal solution and any feature $x\in S$, let $I(S, x)$ is the index of set
	$S$ in the order for feature $x$. For any set $T$ and feature $x\in T$, we 
	denote the coverage of $x$ in the collection of accepted sets of the online
	algorithm when $T$ arrives by $c(x, T)$. We categorize the pairs $(i, x)$ in
	the optimal offline solution according to the behavior of the online algorithm 
	on receiving set $S(i, x)$ (a	pair $(i, x)$ can be placed in any category it 
	qualifies for; every set qualifies for at least one category):
	\begin{enumerate}
		\item $\sum_{y\in S(i, x)} \psi(I(S(i, x), y)) < \frac{\Psi}{2B}$.
		\item $c(x, S(i, x)) \geq i$ and set $S(i, x)$ is accepted by the online 
			algorithm.
		\item $c(x, S(i, x)) \geq i$ and set $S(i, x)$ is rejected by the online 
			algorithm.
		\item $c(x, S(i, x)) < i$ and set $S(i, x)$ is accepted by the online 
			algorithm.
		\item $\sum_{y\in S(i, x)} \psi(I(S(i, x), y)) \geq \frac{\Psi}{2B}$,
			$c(x, S(i, x)) < i$ and set $S(i, x)$ is rejected by the online 
			algorithm.
	\end{enumerate}
	
	The average reward earned by a set in the offline optimal solution is at least 
	$\frac{\Psi}{B}$. Therefore, the sum of rewards of all sets in the first
	category in the offline optimal solution is less than ${\Psi}/2$. 
	
	For each pair $(i, x)$ in the second or third category, the online algorithm has a 
	coverage of at least $i$ on feature $x$. Therefore, the total reward earned
	by the online algorithm is at least the total reward earned by sets $S(i, x)$
	on feature $x$ in the optimal offline solution, where $(i, x)$ is in the 
	second or third category.
	
	For each pair $(i, x)$ in the fourth category, the online algorithm earns a 
	reward of at least $\psi(i)$ on feature $x$ when it accepts $S(i, x)$. 
	Therefore, the total reward collected by the online algorithm is at least
	the total reward earned by sets $S(i, x)$ on feature $x$ in the optimal
	offline solution, where $(i, x)$ is in the fourth category.
	
	Finally, consider a set $T = S(i, x)$ where $(i, x)$ is in the fifth category.
	Clearly, for each $y\in T$, $(I(T, y), y)$ is either in the third category
	or in the fifth category. Since $T$ was not accepted by the online algorithm, 
	the total reward of 	$T$ in the online algorithm must be less than 
	$\frac{\Psi}{4B}$. However, the total reward of $T$ in the optimal offline 
	solution is at least $\frac{\Psi}{2B}$. This implies that at least half 
	the reward of $T$ in the optimal offline solution came from pairs 
	$(I(T, y), y)$ that are in the third category; consequently, at most half the
	reward of $T$ came from pairs $(I(T, y), y)$ that are in the fifth category.
	
	Combining these observations proves the lemma.
\end{proof}
\noindent
The following lemma is an immediate consequence of Lemma~\ref{lma:competitive} and 
Corollary~\ref{cor:iid-reward}.
\begin{lemma}\label{lma:epoch}
	The total reward collected in a epoch by the online algorithm is at least 
	$\frac{\bar{\Phi}}{12}$, with high probability.
\end{lemma}
\noindent
\begin{theorem}\label{thm:main}
	At the end of the algorithm, the coverage on every feature is at least $\lambda/96$,
	with high probability.
\end{theorem}
\begin{proof}
	Clearly, this property is satisfied by any discarded feature. Suppose an feature
	that was not discarded till the end violates this property. 
	Then, the contribution of that feature to $\bar{\Phi}$ is at least 
	\begin{equation*}
		\left(1 - \frac{4\log n}{\lambda}\right)^{\lambda/96}
		= \left(1 - \frac{4\log n}{\lambda}\right)^{(\frac{\lambda}{4\log n})(\frac{\log n}{24})}
		\geq (11/12)^{\log n/2}
		= 1/\sqrt{n}.
	\end{equation*}
	However, by Lemma~\ref{lma:epoch}, at the end of $2\log n$ epochs, 
	$\bar{\Phi} \leq \frac{\Phi}{n^2} < \frac{1}{\sqrt{n}}$ for large
	enough $n$. 
\end{proof}

}
