\subsection{Reducing the Number of Comparisons} 
\label{sec:setreduction}
 
% \begin{enumerate} 
 %\item we discuss optimization issues, such as reduction on the number of sets during the computation.
%\end{enumerate}

In order to compute a score for every instance in each candidate set $C(s) \in C(S)$, our class-based matching approach requires a maximum of $|C(S)| \times |C|$ comparisons. Since $|C(S)|$ can be large, we propose to reduce the number of comparisons by reducing $C(S)$ to a minimal subset $C(S)^*$ such that the feature distribution of $C(S)^*$ differs only within an error margin $\epsilon$ from the feature distribution of $C(S)$. Then,  $C(S)^*$  is used in the line 3 of Alg. \ref{alg:sim} instead of $C(S)$, i.e.,  $C(S)^-=C(S)^* \setminus C(s)$. We define the feature set and the distribution over elements in that set as follows:

\begin{definition}[Feature Set] The feature set of $C(S)$ is $F(C(S)) = \bigcup_{C(s) \in C(S)} F(C(s))$. 
% where \\$F(C(s)) = \{A(C(s)), D(C(s)), O(C(s)), T(C(s))\}$.
\end{definition} 
 
\begin{definition}[Feature Distribution] A distribution over the feature set $X=F(C(S))$, denoted by $Pr(X)$, assigns a probability $p(x)$ to every feature $x$, i.e. the probability of observing a feature $x$ through the repeated sampling of features from $X$: 
\[
p(x)=Pr\{X=x\}= \frac{\sum_{C(s) \in C(S)} |\{x\} \cap F(C(s))|}{|F(C(S))| \times |C(S)|}
\]

where

\begin{enumerate}
\item $p(x) \geq 0$ for all $x \in X$ and
\item $\sum_{x \in X} p(x) = 1$.
\end{enumerate}  
\end{definition}

In the ideal case, $C(S)^*$ contains a much smaller amount of candidate sets compare to $C(S)$, i.e. $|C(S)^*| << |C(S)|$, while carrying the same amount of information such that the similarity scores computed for $C(S)^*$ and $C(S)$ are the same. In order to capture the differences in the provided information content, we use the \textit{z-test}, which is a standard method for analyzing the similarity/difference between the distribution of a sample and the distribution of the original population: 
\[z\text{-}test =\frac{(\mu(sample) - \mu(population))}{(\frac{\sigma(population)}{\sqrt{size(sample)}} )}  
\]
where $\mu(\cdot)$, $\sigma(\cdot)$ and $size(\cdot)$ denote the mean, the standard deviation and the size, and  $population=Pr(F(C(S)))$ and $sample=Pr(F(C(S)^*))$.


A brute force algorithm to solve this problem is to enumerate all possible subsets of $C(S)$, i.e., its power set $2^{|C(S)|}$. Then, for each set in $2^{|C(S)|}$, it picks the minimal set $C(S)^*$ that has a distribution equivalent to the one of $C(S)$. In the worse case, this algorithm takes $O(2^{|C(S)|})$ verifications to find $C(S)^*$, which is prohibitive even for small $C(S)$. 
%In addition, this problem is NP-hard as formalized in Theorem \ref{theorem:nphard}, which can be proved using a reduction from the
%set cover problem \cite{Hochbaum:97}.
%\begin{theorem} Find a minimal set $C(S)^* \subseteq C(S)$ such that $Pr(F(C(S)^*)) \sim Pr(F(C(S)))$ is NP-hard.
%\label{theorem:nphard}
%\end{theorem}

We note the attempt to find an optimal solution to this problem may go against our goal. We need to find the set $C(S)^* \subseteq C(S)$ at very low cost so that the time spent is smaller than the gain that can be achieved by using $C(S)^*$ instead of $C(S)$. We thus use an efficient greedy algorithm that exploits the following intuition:  a sample is more similar to its population when it contains more data from the population. Without enumerating and evaluating each subset, it iteratively extracts and adds a subset $C(s) \in C(S)$ to the sample $C(S)^*$ until the \textit{z-test} between $Pr(F(C(S)))$ and $Pr(F(C(S)^*)))$ approaches the confidence value commonly used in the literature,\footnote{which, under our assumption of normal distribution, is in $[-1.96,1.96]$} or all $C(s) \in C(S)$ is added to $C(S)^*$. For faster convergence, only features that occur more than once in the data are considered in $F(C(S))$. 

The procedure to obtain $C(S)^*$ is summarized in Alg.\ \ref{alg:setreduction}. It takes $O(|C(S)|)$, in the worse case. In Sec.\ \ref{sec:evaluation}, we compare the time performance and accuracy of Alg.\ 1 with and without this procedure.
%The set of candidate sets $C(S)^*$ obtained in this procedure is used to compute the instances scores as discussed before. On Sec. \ref{sec:evaluation}, we compare the performance of SERIMI with and without the candidate sets reduction algorithm. 

\begin{algorithm}

\caption{CandidateSetsReduction($C(S)$).}
\begin{algorithmic}[1]
%\scriptsize\tt 
\STATE  $C(S)^* \leftarrow \emptyset$ 
\STATE $\mu \leftarrow mean(p(C(S)))$
\STATE $\sigma \leftarrow stdv(p(C(S)))$
\FORALL{$C(s) \in C(S)$} 
\STATE  $C(S)^* \leftarrow C(S)^* \cup C(s)$ 
\STATE $n \leftarrow |C(S)^*|$
\STATE $M \leftarrow mean(p(C(S)^*))$
\STATE $SE \leftarrow \frac{\sigma}{\sqrt{n}}$
\STATE $z \leftarrow \frac{(M - \mu) }{SE}$
\IF {$z$ is in the confidence interval}
\RETURN $C(S)^*$
\ENDIF
\ENDFOR  
\RETURN $C(S)^*$
\end{algorithmic}
\label{alg:setreduction}
\end{algorithm}

