\chapter{Appendix: Formulas}
\label{chp:formulas}

Below, $x, y$ are the feature vectors with known means $\mu_x, \mu_y$ and known standard deviations $\sigma_x, \sigma_y$. $K$ is the size of both feature vectors. $a, b, c, d$ describe the feature vectors as explained in Section \ref{sec:pred}.

\section{Quantitative Measures}

\begin{equation}
\label{formula:hist}
HistogramIntersection=\sum\limits_{i} min(x_i, y_i)
\end{equation}


\begin{equation}
\label{formula:def}
Modi\mathit{f}ied Dot Product=\frac{\sum\limits_{i} x_i y_i}{K}\\
\end{equation}

\begin{equation}
\label{formula:tanimoto}
TanimotoIndex=\frac{\sum\limits_{i} x_i y_i}{\sum\limits_{i} x_i^2 + \sum\limits_{i} y_i^2 - \sum\limits_{i} x_i y_i}
\end{equation}

\begin{equation}
\label{formula:corr}
 CorrelationCoe\mathit{ff}icient=\frac{\sum_i (x_i-\mu_x)(y_i-\mu_y)}{\displaystyle\sqrt{\sum_i (x_i-\mu_x)^2}\ \displaystyle\sqrt{\sum_i (y_i-\mu_y)^2}}
\end{equation}

\begin{equation}
\label{formula:cosine}
CosineMeasure=\frac{\sum\limits_{i} x_i y_i}{\sqrt{\sum\limits_{i} x_i^2} \sqrt{\sum\limits_{i} y_i^2}}
\end{equation}

\begin{equation}
\label{formula:meehl}
MeehlIndex=\sum_{i}^{K-2}((x_i-x_{i+1})-(y_i-y_{i+1}))^2 
\end{equation}

We set $a_2=a_1=2$ in Equation \ref{formula:minkowski}, i.e. we used the Euclidean Distance for our experiments.

\begin{equation}
\label{formula:minkowski}
MinkowskiDistance=\displaystyle\sqrt[a_2\ ]{\frac{|x_i-y_i|^{a_1\ }}{K}}
\end{equation}

\begin{equation}
\label{formula:exponential}
ExponentialDivergence=\sum_i x_i log(\frac{x_i}{y_i})^2
\end{equation}

\begin{equation}
\label{formula:kagan}
KaganDivergence=\frac{1}{2} \sum_i \frac{(x_i-y_i)^2}{x_i}
\end{equation}

\begin{equation}
\label{formula:jeffrey}
Je\mathit{ff}reyDivergence=\sum_i (x_i-y_i) log(\frac{x_i}{y_i})
\end{equation}

\begin{equation}
\label{formula:kullback}
KullbackLeibler=\sum_i x_i log(\frac{x_i}{y_i})
\end{equation}

\begin{equation}
\label{formula:maha}
MahalanobisDistance=\sum_i \sum_j (x_i-y_i)(x_j-y_j)
\end{equation}

Equation \ref{formula:norm} is the most successful attempt of the author of this work. It is a very simple normalization that has certainly be used to measure similarity before.

\begin{equation}
\label{formula:norm}
Normalization=|\frac{\mu_x}{\sigma_x}-\frac{\mu_y}{\sigma_y}|
\end{equation}

\section{Predicate-based Measures}

\begin{equation}
\begin{split}
\label{formula:hawkinsdotson}
HawkinsDotson=\frac{1}{2}(\frac{a}{a+b+c}+\frac{d}{b+c+d})
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:sorgenfrei}
Sorgenfrei=\frac{a^2}{(a+b)(a+c)}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:russelrao}
RusselRao=\frac{a}{a+b+c+d}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:baroniurbanibuser}
BaroniUrbaniBuser=\frac{\sqrt{ad}+a-b-c}{\sqrt{ad}+a+b+c}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:proportion}
ProportionO\mathit{f}Overlap=\frac{ad-bc}{K(1-\frac{a}{(a+b)(a+c)})(2a+b+c-\frac{(a+b)(a+c)}{K})}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:coeff}
 Coe\mathit{ff}icientO\mathit{f}Arithmetic Means=\frac{2(ad-bc)}{K(2a+b+c)}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:variancedis}
VarianceDissimilarity=\frac{b+c}{4K}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:batageljbren}
BatageljBren=\frac{bc}{ad}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:baulieu2}
Baulieu2=\frac{K(b+c)-(b-c)^2}{K^2}
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:hamming}
HammingDistance=b+c
\end{split}
\end{equation}

\begin{equation}
\begin{split}
\label{formula:complhamming}
Complement O\mathit{f}Hamming Distance=a+d
\end{split}
\end{equation}

\section{Generalization Functions}

\label{sec:general}

We set $\rho=0.6$ in Equation \ref{formula:dirac} for our experiments.

\begin{equation}
\label{formula:dirac}
Boxing= 
\begin{dcases}
    1,& \text{if } -\rho \leq distance \leq \rho\\
    0,              & \text{otherwise}
\end{dcases}
\end{equation}

\begin{equation}
\label{formula:gaussian1}
Gaussian=e^{-distance^2}
\end{equation}

\begin{equation}
\label{formula:shepard1}
Shepard=e^{-|distance|}
\end{equation}

\begin{equation}
\label{formula:none1}
None=-distance
\end{equation}

