%=====================================================================
% 
% approxjoin4/approxjoin.tex
%
% ILP2008 "approximate join" paper (Draft 4 - CRC)
% 
%=====================================================================

\documentclass{llncs}
\usepackage{llncsdoc}

% enable writing of latex source line no. info to pdf
%\usepackage{pdfsync}

\usepackage{amsmath,amssymb} %,amsfonts}

\usepackage{stmaryrd}

\usepackage{latexsym}
\usepackage{times}

\usepackage{lscape}
\usepackage{subfig}

\usepackage{epsfig}

\usepackage{pstricks,pst-node,pst-tree,graphics}

%\usepackage{listings}
%\usepackage{verbatim}



\spnewtheorem{notation}[theorem]{Notation}{\itshape}{\rmfamily}

% semantic join symbols
\newcommand*{\lsem}{\llbracket}
\newcommand*{\rsem}{\rrbracket}
\newcommand*{\semjoin}{\stackrel{\sim}{\Join}}
\newcommand*{\bigsemjoin}{\bigtimes}
\newcommand*{\semeq}{\sim}

\newcommand{\sjoin}[2]{\ensuremath{\textrm{\em #1} \stackrel{\sim}{\Join} \textrm{\em #2}}}
\newcommand*{\sjoinon}[3]{\ensuremath{\textrm{\em #1} \stackrel{\sim}{\Join}_{#2} \textrm{\em #3}}}
\newcommand*{\joinon}[3]{\ensuremath{\textrm{\em #1} \Join_{#2} \textrm{\em #3}}}

\newcommand*{\semcup}{\stackrel{\sim}{\cup}}
\newcommand*{\semcap}{\stackrel{\sim}{\cap}}
\newcommand*{\semnot}{\stackrel{\sim}{\neg}}

% pstree - type tree nodes
\newcommand{\ltype}[3]{\TR[name=#1]{\small \ensuremath{#3}}\tlput{\tiny \ensuremath{\scriptstyle{#2}}}}
\newcommand{\rtype}[3]{\TR[name=#1]{\small \ensuremath{#3}}\trput{\tiny \ensuremath{\scriptstyle{#2}}}}

% pstree - term tree nodes
\newcommand{\lterm}[2]{\TR{\small \ensuremath{#2}}\tlput{\tiny \ensuremath{\scriptstyle{#1}}}}
\newcommand{\rterm}[2]{\TR{\small \ensuremath{#2}}\trput{\tiny \ensuremath{\scriptstyle{#1}}}}

% pstree - reflexive loop back to root
\newcommand{\loopback}[2]{%
\ncloop[angleB=180,loopsize=.4,arm=.5,linearc=.2,armA=0.3cm,armB=.3cm,linestyle=solid,linewidth=0.25pt]{->}{#1}{#1}
\taput{\small \ensuremath{\scriptstyle{#2}}}
}

\newcommand{\domain}{\ensuremath{D}} 
\newcommand{\domainb}{\ensuremath{\mathfrak{B_\alpha}}} 
\newcommand{\occurrence}{\ensuremath{\mathcal{O}}} 
%\newcommand{\occurrenceb}{\ensuremath{\mathcal{O}_{\mathfrak{B}}}} 
\newcommand{\occurrenceb}{\ensuremath{\mathcal{O}}} 


%=====================================================================
\title{Querying and Merging Heterogeneous Data by Approximate Joins on Higher-Order Terms}

\author{Simon Price \and Peter Flach}

\institute{
    Department of Computer Science, University of Bristol, 
    Bristol BS8~1UB, United Kingdom. \\
    \email{\{simon.price,peter.flach\}@bristol.ac.uk}
}

\begin{document}
\maketitle

%=====================================================================
\begin{abstract}
Integrating heterogeneous data from sources as diverse as web pages, digital libraries, knowledge bases, the Semantic Web and databases is an open problem. The ultimate aim of our work is to be able to query such heterogeneous data sources as if their data were conveniently held in a single relational database. Pursuant to this aim, we propose a generalisation of joins from the relational database model to enable joins on arbitrarily complex structured data in a higher-order representation. By incorporating kernels and distances for structured data, we further extend this model to support approximate joins of heterogeneous data. We demonstrate the flexibility of our approach in the publications domain by evaluating example approximate queries on the CORA data sets, joining on types ranging from sets of co-authors through to entire publications.
\end{abstract}


%=====================================================================
\section{Introduction}

An increasingly important problem is the integration of data from sources as diverse as web pages, digital libraries, knowledge bases, the Semantic Web and databases that, collectively, are referred to as heterogeneous data. Integration allows an application to query the data using a single query language, just as if the data were a single homogeneous data source.

In this paper we combine two contrasting knowledge representational approaches into a single coherent formalism that is well suited to the integration of heterogeneous data. The first of these representational approaches, the relational model, is widely used as the basis for relational databases and is accompanied by a well-defined algebra for manipulating relational data. However, relational representations of complex structured data can be difficult to design and even more difficult for a human to read. The second representational approach, terms in a higher-order logic, offers a more human-readable representation of structured data than the relational model but has no well-defined analogue of the relational algebra for the querying of its terms. The formalism we introduce here is a subset of relational algebra upgraded for terms in a higher-order logic, bringing the well known and widely used join operator of relational algebra to the knowledge representational formalism of higher-order terms. This new algebra incorporates a generalisation of the relational model to higher-order terms and we show that a join operator from the relational model may be viewed as a special case of the higher-order join.

Data integration typically transforms heterogeneous data formats into a single homogeneous data format, usually into the format which has the most convenient algebra for data integration rather than the format with the most natural representation of the data. In an ideal data integration scenario, where no uncertainty exists in the correspondences between individuals from different data sources, the homogeneous data format chosen is merely a technical implementation detail and places no restrictions on what may be reliably integrated. Unfortunately, the more diverse the sources of data being integrated, the more likely it will be that the integration involves a degree of uncertainty -- for example, in identifying correspondences between individuals from different data sources. In order to automate such integration tasks, approximate matching techniques from statistics, machine learning and data mining may be employed. However, the transformation of data to the most widely used homogeneous format, relational data, obscures the data's natural type and structure, unnecessarily complicating the application of approximate matching techniques. In this paper, we show that approximate matching can take place without this obfuscating transformation to a relational representation. Our approach to approximate matching for data integration uses kernels and distances applied directly to representations of indivuals as (closed) terms in a higher-order logic.

The outline of the paper is as follows: Section 2 reformulates the traditional relational join from the relational model. Section 3 introduces the knowledge representational formalism and describes a family of kernels and distances on that formalism. Section 4 upgrades the relational join to handle structured data. Section 5 investigates the application of joins for structured data. The remaining sections review related work and future directions before concluding.


%=====================================================================
\section{Relational Joins}

We first define the relational join in its exact form and then adapt this to define the approximate relational join. Our definitions differ from those of the traditional relational model in that we have not embedded attribute names, which are in fact database-specific schema metadata, into the data representation. We assume throughout that there is an associated schema for each relation. By keeping the metadata separate from the data we achieve a more elegant upgrade from the relational model to the structured data model.

%---------------------------------------------------------------------
\subsection{Exact Relational Joins} \label{traditionalrelation}

An $n$-tuple $(x_1,\dots,x_n)$ is an element in $\domain_1 \times \dots \times \domain_n$ where $x_1,\dots,x_n$ are values drawn from domains $\domain_1, \dots, \domain_n$ respectively. We refer to 
$n$-tuples as {\em tuples} unless the value of $n$ is significant.
Item $i \in \{ 1,\dots,n \}$ of a tuple $t=(x_1,\dots,x_n)$ is the value $x_i$ and is written $t|_i$. A {\em relation} $R$ of degree $n$ is a finite set of $n$-tuples such that $R \subseteq \domain_1 \times \dots \times \domain_n$ where $\domain_1, \dots, \domain_n$ are domains, which need not necessarily be distinct. The {\em relation index} $I_R$ of a relation $R$ of degree $n$ is the set $\{ 1,\dots,n \}$.

\begin{definition}[$\theta$-Restriction] \label{thetarestriction}
Let $\theta$ be a predicate  $\theta : \domain \times \domain \to \mathbb{B}$ for some domain $\domain$. If $A$ and $B$ are relations with tuple items $a|_i \in \domain$ and $b|_j \in \domain$ respectively for some
$(i,j) \in I_A \times I_B$, then the {\em $\theta$-restriction} $\sigma_{i \theta j}$ is defined on $T \subseteq A \times B$ as 
follows: $
\sigma_{i\, \theta j}(T)
= 
\{ (a,b) \ | \ \ a|_i\, \theta\, b|_j\ \wedge\ (a,b) \in T \}.
$
\end{definition}

\noindent
The infix $\theta$ in the subscript of $\sigma$ follows the historical convention from the relation database literature and so $i\, \theta j$, or equivalently $\theta(i,j)$, does not mean that $\theta$ applies to $i$ and $j$\,;\; instead $i\, \theta j$ is shorthand notation for the membership test \,$a|_i\, \theta\ b|_j$ for all $(a,b) \in T$. 
The operator\;$\theta$ is typically drawn from the set $\{ =,\neq,<,\leq,>,\geq \}$ but does not necessarily have to come from this set.
$\theta$-restriction is often just referred to as {\em restriction} and in such cases $\theta$ is assumed to be the equality operator.
The name {\em selection} is often used instead of {\em restriction}, but the latter avoids confusion with the similarly named and better known {\em\tt select} operator from SQL which has a somewhat different meaning. Restriction is also sometimes defined as {\em generalised restriction}: Let $\varphi$ be a proposition that consists of atoms as allowed in $\theta$-restriction and the logical operators $\wedge$, $\vee$ and $\lnot$, then if $A$ and $B$ are relations, the generalised restriction $\sigma_{\varphi}$ is defined on $T \subseteq A \times B$ as
$
\sigma_{\varphi}(T)
= 
\{\ t \ | \ \varphi(t)\ \wedge\ t \in T \}.
$
Standard results show that generalised restrictions can always be expressed as combinations of $\theta$-restrictions.\, $\theta$-restriction is thus the basis of the following fundamental relational join operator.

\begin{definition}[$\theta$-Join]
Let $\theta$ be a predicate  $\theta : \domain \times \domain \to \mathbb{B}$ for some domain $\domain$. If $A$ and $B$ are relations with tuple items $a|_i \in \domain$ and $b|_j \in \domain$ respectively for some
$(i,j) \in I_A \times I_B$, then the {\em $\theta$-join} $\Join_{i\, \theta j}$ of $A$ and $B$ is 
defined as $
A \Join_{i\, \theta j} B = \sigma_{i\, \theta j}(A \times B).
$
\end{definition}

\noindent
When $\theta$ is equality the $\theta$-join is called the {\em equi-join}.
By replacing the $\theta$-restriction operator in the $\theta$-join by the generalised restriction operator we arrive at the definition of the {\em generalised join}:
Let $\varphi$ be a proposition that consists of atoms as allowed in $\theta$-restriction and the logical operators $\wedge$, $\vee$ and $\lnot$. If $A$ and $B$ are relations then the {\em relational join} $\Join_{\varphi}$ is defined as
$
A \Join_{\varphi} B = \sigma_{\varphi}(A \times B).
$
Other non-fundamental joins include the {\em natural join}, {\em semijoin}, {\em antijoin}, {\em outer joins} and {\em inner joins} \cite{Codd1990,Date1991}. For the purposes of upgrading relational joins to handle structured data, it is sufficient to consider just the $\theta$-join and optionally, as a useful syntactic convenience, the generalised join.

%---------------------------------------------------------------------
\subsection{Approximate Relational Joins}

In order to turn an exact relational join into an approximate one it is necessary to replace the exact $\theta$ operator in\, $\theta$-restriction with a suitable approximate version. For example, substituting exact equality $=$ with an approximate equality $\approx$ enables joining on tuple items that are either the same or in some way sufficiently similar.

One method of implementing approximate equality $\approx$ is to use a  distance metric or pseudo-metric {\em dist}, defined on the domain of a pair of relational tuple items, together with a threshold $\delta$ to define a {\em proximity relation}.

\begin{definition}[Proximity]
If the function\, $\text{dist} : \domain \times \domain \to \mathbb{R}$\, is a distance on pairs of values from some domain $\domain$ and $\delta \in \mathbb{R}$ $(\delta \geq 0)$ is a threshold then {\em proximity} is a predicate $\approx\; : \domain \times \domain \to \mathbb{B}$ defined by
\[
    \forall x,y \in \domain\;\ (x \approx y)
    \iff 
    \{(x,y)\ |\ {\text dist}(x,y) \leq \delta \ \wedge \ x,y \in \domain \}.
\]
\end{definition}

By the definition of distance, the co-domain of {\em dist} is not constrained to have an upper bound. Some normalising function $\varphi$ may be used to apply an upper bound to a distance. The function $\varphi : \mathbb{R} \to \mathbb{R}$ must be a non-decreasing function from the positive reals into some closed interval, typically $[0,1]$, such that $\varphi(0) = 0$, $\varphi(v) > 0 \text{ if } v>0$, and $\varphi(v+u) \leq \varphi(v) + \varphi(u)$, for each $v$ and $y$. Example choices of $\varphi$ from \cite{Lloyd2003bh} are $\varphi(v)=\textit{min}(v,1)$ or $\varphi(v)=\frac{v}{v+1}$. Alternatively, the normalisation may be performed in the feature space of $x,y \in \domain$ so that $\textit{dist}(x,y)$ is inherently normalised. For example, if the distance is derived from a kernel then a normalising kernel may be used \cite{Gaertner2004ax}.

\begin{definition}[Proximity-Join]
Let $\approx\ : \domain \times \domain \to \mathbb{B}$ be a proximity for some domain $\domain$. If $A$ and $B$ are relations with tuple items $a|_i \in \domain$ and $b|_j \in \domain$ respectively for some
$(i,j) \in I_A \times I_B$, the {\em proximity-join} $\semjoin_{i\, \approx j}$ of $A$ and $B$ is 
$
A \semjoin_{i\, \approx j} B = \sigma_{i\, \approx j}(A \times B).
$
\end{definition}

\noindent
The same historical notational convention is followed here for the subscripted $\approx$ as for the subscripted $\theta$ described earlier for the exact $\theta$-join.
The proximity-join as defined here is an approximate analogue of the exact relational equi-join. By choosing other proximity relations that are approximate analogues of exact relations, for example where $\theta \in \{ =,\neq,<,\leq,>,\geq \}$, an approximate version of the relational $\theta$-join might be defined. In this paper we restrict our attention to the proximity-join.


%=====================================================================
\section{Representing Structured Data}

The relational model is now the de facto standard for database-driven applications in data mining and computing in general, but it is not ideally suited for representing semi-structured data such as Web pages and XML, nor structured data such as the Semantic Web and RDF. However, the representation of such structures in relational databases is commonplace using a multitude of (often tortuous) representations and querying patterns. By contrast, the {\em individuals-as-terms} model offers straight forward representations of both structured and semi-structured data while at the same time having the representational capacity to represent relations from the relational model.
The individuals-as-terms representation is a generalisation of the relational model's attribute-value representation and collects all information about an individual in a single term. 

We are not advocating the individuals-as-terms repesentation as a replacement for the general purpose relational representation. But in the context of querying and merging heterogeneous data, the individuals-as-terms representation more transparently models the structure of the data in a way that is both human-readable and that explicitly exposes that structure to machine learning and data mining algorithms.

The knowledge representational formalism we use as our individuals-as-terms representation is {\em basic terms}, a family of typed terms in higher-order logic, which is based on Church's simple theory of types \cite{church1940} with several extensions \cite{Lloyd2003bh}. This formalism has been chosen over the possible alternative of first-order logic because terms in the higher-order logic natively support a variety of data types that are important for representing individuals, including sets, multisets and graphs. Being a strongly typed logic, helps to reduce search spaces and the type of terms provides useful metadata. The theory behind the logic and the individuals-as-terms formalism is set out in \cite{Lloyd2003bh} and we give only a brief overview here. 

We assume an {\em alphabet} consisting of:
$\mathfrak{T}$ the set of type constructors of various arities,
$\mathfrak{P}$ the set of parameters,
$\mathfrak{C}$ the set of constants, and 
$\mathfrak{V}$ the set of variables.
Included in $\mathfrak{T}$ is the constructor $\Omega$ of arity $0$ with a corresponding domain of $\{ \textit{True}, \textit{False} \}$, the booleans. Types are constructed from type constructors in $\mathfrak{T}$ and type variables in $\mathfrak{P}$ using the symbols $\to$ for function types and $\times$ for product types.
A {\em type} is defined inductively as follows:
(1) Each parameter in $\mathfrak{P}$ is a type.\: 
(2) If $T$ is a type constructor in $\mathfrak{T}$ of arity $k$ and $\alpha_1,\dots,\alpha_k$ are types, then $T\ \alpha_1 \dots \alpha_k$ is a type. (For $k=0$, this reduces to a type constructor of arity 0 being a type).\: 
(3) If $\alpha$ and $\beta$ are types, then $\alpha \to \beta$ is a type.\:
(4) If $\alpha_1,\dots,\alpha_n$ are types, then $\alpha_1 \times \dots \times \alpha_n$ is a type.
(For $n=0$, this reduces to $1$ being a type).
A type is {\em closed} if it contains no parameters. 
$\mathfrak{S}^C$ denotes the set of all closed types obtained from an alphabet.

The set of constants $\mathfrak{C}$ includes $\top$ (true) and $\bot$ (false). A {\em signature} is the declared type for a constant.
A constant $C$ with signature $\alpha$ is often denoted $C\ :\ \alpha$. 
Let $[]$ be the empty list constructor with signature $\text{\em List}\ a$ where $a$ is a parameter and $\text{\em List}$ is a type constructor. Let $\#$ be the list constructor with signature $a \to \text{\em List}\ a \to \text{\em List}\ a$.

The {\em terms} of the logic are the terms of typed $\lambda$-calculus and are formed in the usual way by abstraction, tupling and application from constants in $\mathfrak{C}$ and a set of variables. The set of all terms obtained from a particular alphabet is denoted $\mathfrak{L}$. 
A basic term is the canonical representative of an equivalence class of terms \cite{Gaertner2004ax,Lloyd2003bh}.
The set of {\em basic terms}, $\mathfrak{B}$, is defined inductively as follows:
(1) {\em Basic structures} -- If $C$ is a data constructor having signature $\sigma_1 \to \dots \to \sigma_n \to (T\ a_i \dots a_k)$, $t_1,\dots,t_n \in \mathfrak{B}\ (n \geq 0)$, and $t$ is $C\ t_1 \dots t_n \in \mathfrak{L}$, then $t \in \mathfrak{B}$.\:
(2) {\em Basic abstractions} -- If $t_1,\dots,t_n \in \mathfrak{B}$, $s_1,\dots,s_n \in \mathfrak{B}\ (n \geq 0)$, $s_0 \in \mathfrak{D}$ and $t$ is 
$
    \lambda x \text{ if } x=t_1 \text{ then } s_1 \text{ else } \dots 
     \text{ if } x=t_n \text{ then } s_n \text{ else } s_0 \in \mathfrak{L},
$
then $t \in \mathfrak{B}$.\:
(3) {\em Basic tuples} -- If $t_1,\dots,t_n \in \mathfrak{B}\ (n \geq 0)$ and $t$ is $(t_1,\dots,t_n) \in \mathfrak{L}$, then $t \in \mathfrak{B}$.
See section \ref{structuredjoins} for examples and diagrams of basic terms.


%---------------------------------------------------------------------
\subsection{Kernels and Distances for Basic Terms}

Kernel functions \cite{Shawe-Taylor2004ve} are an effective way of inducing distances on a wide variety of data structures. One promising recent kernel function for structured data is the default kernel for basic terms introduced in \cite{Gaertner2004ax}.

\begin{definition}[Default Kernel for Basic Terms \cite{Gaertner2004ax}]
The function $k : \mathfrak{B} \times \mathfrak{B} \to \mathbb{R}$ is defined inductively on the structure of terms in $\mathfrak{B}$ as follows.
\begin{enumerate}

\item If $s,t \in \mathfrak{B}_\alpha$, where $\alpha = T \alpha_1 \dots \alpha_k$, for some $T, \alpha_1, \dots, \alpha_k$, then
\[
k(s,t) =
    \begin{cases}
        \kappa_T(C,D) & \text{if} \ \  C \neq D \\
        \kappa_T(C,C) + \sum_{i=1}^{n}{k(s_i,t_i)} & \text{otherwise}
    \end{cases}
\]
where $s$ is $C\ s_1 \dots s_n$ and $t$ is $D\ t_1 \dots t_m$. \\

\item If $s,t \in \mathfrak{B}_\alpha$, where $\alpha = \beta \to \gamma$, for some $\beta$, $\gamma$, then
\[
k(s,t) =
    \sum_{\substack{u \in \text{supp}(s) \\ v \in \text{supp}(t)}}{k(V(s\ u),V(t\ v)) \cdot k(u,v)}.
\]

\item If $s,t \in \mathfrak{B}_\alpha$, where $\alpha = \alpha_1 \times \dots \times \alpha_n$, for some $\alpha_1, \dots, \alpha_n$, then
\[
k(s,t) = \sum_{i=1}^{n}{k(s_i,t_i)} ,
\]
where $s$ is $(s_1,\dots,s_n)$ and $t$ is $(t_1,\dots,t_n)$. \\

\item If there does not exist $\alpha \in \mathfrak{S}^c$ such that $s,t \in \mathfrak{B}_\alpha$, then $k(s,t) = 0$.

\end{enumerate}
\end{definition}

\noindent
The definition, assumes that for each type constructor $T \in \mathfrak{T}$, $\kappa_T : \mathcal{X}_T \times \mathcal{X}_T \to \mathbb{R}$ is a kernel on the set of data constructors $\mathcal{X}_T$ associated with $T$.  Below we give an example of the calculation of the default kernel for an example data structure: sets of strings.

\begin{example}[Default Kernel on Sets of Strings]
Let $S$ be a nullary type constructor for strings and $A,B,C,D:S$. 
Choose $\kappa_S$ and $\kappa_\Omega$ to be the matching kernel.
Let $s$ be the set $\{A,B,C\} \in \mathfrak{B}_{S \to \Omega}$,
$t=\{A,D\}$, and $u=\{B,C\}$. Then
\begin{eqnarray*}
k(s,t)&=& k(A,A)k(\top,\top)+k(A,D)k(\top,\top)+k(B,A)k(\top,\top) \\
      & & \quad+k(B,D)k(\top,\top)+k(C,A)k(\top,\top)+k(C,D)k(\top,\top) \\
      &=& \kappa_S(A,A)\kappa_\Omega(\top,\top)+\kappa_S(A,D)\kappa_\Omega(\top,\top)+\kappa_S(B,A)\kappa_\Omega(\top,\top) \\
      & & \quad+\kappa_S(B,D)\kappa_\Omega(\top,\top)+\kappa_S(C,A)\kappa_\Omega(\top,\top)+\kappa_S(C,D)\kappa_\Omega(\top,\top) \\
      &=& \kappa_S(A,A)+\kappa_S(A,D)+\kappa_S(B,A)+\kappa_S(B,D) \\
      & & \quad+\kappa_S(C,A)+\kappa_S(C,D) \\
      &=& 1+0+0+0+0+0 \\
      &=& 1.
\end{eqnarray*}
Similarly, $k(s,u)=2$ and $k(t,u)=0$.
\end{example}

Noting that valid positive semi-definite kernels induce pseudo-metrics \cite{Gaertner2004ax}, this allows the derivation of a distance from any such kernel, including the kernel for basic terms, as follows.
Let $k : \mathcal{X} \times \mathcal{X} \to \mathbb{R}$ be a kernel on $\mathcal{X}$. The distance measure induced by $k$ is defined as 
$
d_k(s,t) = \sqrt{k(s,s) - 2k(s,t) + k(t,t)}.
$ 
If $k$ is a valid kernel the $d_k$ is well behaved in that it satisfies the conditions of a pseudo-metric.
Continuing the earlier sets of strings example, the following example illustrates the calculation of a distance from the default kernel for basic terms.

\begin{example}[Default Distance on Sets of Strings]
Let $s=\{A,B,C\}$, $t=\{A,D\}$, and $u=\{B,C\}$ where $s,t,u \in \mathfrak{B}_{S \to \Omega}$.
We have $k(s,s)=3$, $k(t,t)=2$ and $k(u,u)=2$.
Then, $d_k(s,t)=\sqrt{3-3+2}=1.73$, $d_k(s,u)=\sqrt{3-4+2}=1$, and $d_k(t,u)=\sqrt{2-0+2}=2$.
\end{example}

However, one of the strengths of the default kernel is that it allows any other valid kernel to be associated with a specific type. For example, the following {\em p}-spectrum kernel, defined on strings, is used in our experiments later in the paper.

\begin{definition}[{\em p}-Spectrum Kernel  \cite{Shawe-Taylor2004ve}]
The feature space $F$ associated with the {\em p}-spectrum kernel is indexed by $I = \Sigma^p$, with the explicit embedding from the space of all finite sequences over and alphabet $\Sigma$ to a vector space $F$ and is given by 
$
\phi^p_u(s) = | \{ (v_1,v_2)  : s = v_1 u v_2 \} |, u \in \Sigma^p .
$
The associated kernel is defined as
$ 
\kappa_p(s,t) = \langle \phi^p(s), \phi^p(t) \rangle = \sum_{u \in \Sigma^p}{\phi^p_u(s) \phi^p_u(t)} .
$
\end{definition}



%=====================================================================
\section{Relational Joins for Structured Data} \label{structuredjoins}

We now upgrade both exact and approximate relational joins for structured data. The way we achieve this is to first upgrade the knowledge representation of the relation to be a set of basic terms rather than the traditional set of tuples. We then upgrade the relation index so that it indexes parts of a basic term rather than the traditional parts of a tuple. Once these two steps are completed, upgrading the exact relational join follows almost automatically with only modest changes to the definitions of the $\theta$-restriction and joins. The final step then brings together the default kernel for basic terms and the approximate join to arrive at the main result of an approximate relational join for structured data. So to begin, we first upgrade the relation from section  \ref{traditionalrelation} to become the {\em basic term relation}, which is a basic term of type $\alpha \to \Omega$.

\begin{definition}[Basic Term Relation] \label{def:basictermrelation}
A {\em basic term relation} $R \subseteq \mathfrak{B}_\alpha$ is a finite set of basic terms of the same type.
\end{definition}

In order to upgrade the relation index from section \ref{traditionalrelation} so that it is applicable to the basic term relation, a suitable method of indexing sub-parts of a basic term is required. Recall that well-formed basic terms can consist of basic structures (e.g. lists, trees), basic abstractions (e.g. sets,  multisets), basic tuples or arbitrary combinations of these three. 


%---------------------------------------------------------------------
\subsection{Indexing Basic Terms}

In the logic, sub-parts of a term are referred to as subterms and so we are concerned with indexing the subterms of a basic term. The standard method for indexing subterms in the logic enumerates a decomposition of a given term such that every subterm is labelled with a unique string \cite{Lloyd2003bh}. 

However, we introduce an alternative approach to indexing that, instead of enumerating all subterms of a term, defines a {\em type tree index set} over all subtypes of the type of a basic term. To do this we first adopt the definition of a type tree from \cite{Elias2005} and then define a different annotation of the tree such that every member of the type tree index set identifies a set of terms rather than a single term. This  ensures any index defined on a type is meaningful across all terms of that type. Furthermore, the set of subterms identified is guaranteed to consist entirely of well-formed basic terms.

To achieve this we follow the same interpretation of subtypes as \cite{Lloyd2003bh} and restrict our attention to basic terms whose basic structures are in canonical form as defined below.

\begin{definition}[Basic Structures in Canonical Form]
A type $\tau = T\ \alpha_1 \dots \alpha_k$ is a basic structure in canonical form when, for all data constructors $C_i : \tau_{i1} \to \dots \to \tau_{in} \to \tau$ that are associated with $T$, all the types of $\tau_{ij}$ are subtypes of $\tau$.
\end{definition}

\noindent
We begin our definition of the type tree index set with some preparatory notation. 
Let $\mathbb{Z}^+$ denote the set of positive integers and $(\mathbb{Z}^+)^*$ the set of all strings over the alphabet of positive integers, with $\varepsilon$ denoting the empty string. $io$ denotes the string concatenation of $i$ with $o$ where $i \in \mathbb{Z}^+$ and $o \in (\mathbb{Z}^+)^*$.

\begin{definition}[Type Tree Index Set] \label{def:typetreeindexset}
The {\em type tree index set} of a canonical type $\tau$, denoted $\occurrenceb(\tau)$, is the set of strings in $(\mathbb{Z}^{+})^{*}$ defined inductively on the structure of $\tau$.
\begin{enumerate}

\item If $\tau$ is an atomic type, 
then $\occurrenceb(\tau) = \{ \varepsilon \}$.

\item If $\tau$ is a basic structure type $\tau = T\ \alpha_1 \dots \alpha_n$ in canonical form, with data constructors $C_i : \tau_{i_1} \to \dots \to \tau_{i_m} \to \tau$\, for all $i \in \{1,\dots,l\}$, 
then 
$\occurrenceb(\tau) = \{ \varepsilon \}\ \cup\ \bigcup_{v=1}^{p}\{\, vo_v\ |\ o_v \in \occurrenceb(\xi_v) \}$, 
where $\xi_1, \dots, \xi_p$ are  the types from $\alpha_k$ 
where $\alpha_k=\tau_{i_j}$ and $\tau_{i_j} \ne \tau$, and 
assuming that for every $\tau_{i_j} \neq \tau$ 
there exists an $\alpha_k$ such that  $\alpha_k = \tau_{i_j}$.

\item If $\tau$ is a basic abstraction type $\beta \to \gamma$, then $\occurrenceb(\tau) = \{ \varepsilon \}\ \cup\ \{\, 1o\ |\ o \in \occurrenceb(\beta) \}\ \cup\ \{\, 2o\ |\ o \in \occurrenceb(\gamma) \}$.

\item If $\tau$ is a basic tuple type $\tau = \tau_1 \times \dots \times \tau_n$, then $\occurrenceb(\tau) = \{ \varepsilon \}\ \cup\ \bigcup_{i=1}^{n}\{\, io_i\ |\ o_i \in \occurrenceb(\tau_i) \}$.

\end{enumerate}
\end{definition}

\noindent
Part 1, the base case, states that types for which all the associated data structures have arity zero, such as $\Omega$ (the type of the booleans), $\textit{Int}$ (the type of the integers), and $\textit{Char}$ (the type of characters), have a singleton index set containing the empty string.  Part 2 states that each subtype that occurs in the signatures of the associated data constructors, and that is not itself of type $\tau$ of the basic structure, is labelled with a unique string. 
Part 3 labels the $\beta$ and $\gamma$ types of basic abstractions with a pair of unique strings. Similarly, part 4 labels each tuple item in a basic tuple with a unique string.

The significance of defining indexing on the type tree of basic terms rather than on the terms themselves is that each member of a type tree index set $o \in \occurrenceb(\tau)$ is not  uniquely tied to any individual term of type $\tau$. This increases the generality of the indexing such that each member of the type tree index set for type $\tau$ identifies, for any basic term $t:\tau$, an equivalence class of subterms rather than a single term. Thus $\occurrenceb(t)$ induces a set of equivalence classes on the subterms of $t$. We refer to the set of subterms identified with a given member (index) of the type tree index set as the {\em basic subterm set} at that index.

\begin{definition}[Basic Subterm Set]
If $t$ is a basic term of type $\tau$ and $o \in \occurrenceb(\tau)$ then the {\em basic subterm set of $t$ at type tree index $o$}, denoted $t|_o$, is defined inductively on the length of $o$ as follows.
\begin{enumerate}

\item If $o = \varepsilon$, then $t|_o = \{ t \}$.

\item If $o = jo'$, for some $o'$, and 
      $t$ has the form $C\ t_1 \dots t_m$, with associated type $T\ \alpha_1 \dots \alpha_n$, 
      then $t|_o = s_j|_{o'}$ 
      where $s_j = t_i:\tau_i$ 
      such that $\tau_i \neq \tau$ and $\tau_i=\alpha_j$.

\item If $o = 1o'$, for some $o'$, and
      $t$ has the form $\text{\em if\_then\_else}(u,v,s)$,
      then $t|_o = u|_{o'} \cup s|_{o}$.

\item If $o = 2o'$, for some $o'$, and 
      $t$ has the form $\text{\em if\_then\_else}(u,v,s)$,
      then $t|_o = v|_{o'} \cup s|_{o}$.

\item If $o = io'$, for some $o'$, and 
      $t$ has the form $(t_1,\dots,t_n)$,
      then $t|_o = t_i|_{o'}$, for $i=1,\dots,n$.

\end{enumerate}
A {\em basic subterm set} is a set of basic subterms of a basic term at some type tree index. A basic subterm is {\em proper} if it is not at type tree index $\varepsilon$.
\end{definition}

Basic subterms indexed in part 1, the base case, are singleton sets containing an atomic term. Basic subterms indexed in part 2 are basic structures. Basic subterms indexed in parts 3 and 4 are the support and value of basic abstractions, i.e. respective instances of $\alpha$ and $\beta$, from $\alpha \to \beta$. Basic subterms indexed in part 5 are basic tuples.

Below we give examples of a type tree index set and basic subterm sets for each of basic tuples, basic structures, and basic abstractions.  Starting with basic tuples in Example \ref{eg:typetuples} where it can be seen that type-based indexing identifies all the tuple items, but as singleton sets, and in addition it identifies the reflexive term at $t|_\varepsilon$.

\begin{example} \label{eg:typetuples}
If basic tuple $t \in \mathfrak{B}_{\textit{M} \times \textit{N} \times \textit{O} \times \textit{P}}$ is the term $t=(A,B,C,D)$, where $A:\textit{M}$,\: $B:\textit{N}$,\: $C:\textit{O}$,\: $D:\textit{P}$, then the type tree index set of $t$ is  $\occurrenceb(t)=\{\varepsilon,1,2,3,4\}$, the derivation of which can be seen from \figurename \ref{fig:typetuples}. The basic  subterm sets of $t$ are $t|_\varepsilon=\{(A,B,C,D)\}$, $t|_1=\{A\}$, $t|_2=\{B\}$, $t|_3=\{C\}$ and $t|_4=\{D\}$.
\end{example}

\begin{figure}[h]
  \begin{center}
    \subfloat[]{
\pstree[nodesep=4pt,treesep=0.75em,levelsep=10ex,arrows=->,linestyle=solid,linewidth=0.25pt]{\ltype{root}{ }{\alpha_1 \times \dots \times \alpha_n}}{
\loopback{root}{\varepsilon}
\ltype{tupleitem1}{1}{\alpha_1}
\TR[name=tupleitem2,edge=none]{\small \ensuremath{\dots}}
\rtype{tupleitem3}{n}{\alpha_n}
}
    }
    \qquad
    \qquad
    \subfloat[]{
\pstree[nodesep=4pt,levelsep=10ex,arrows=->,linestyle=solid,linewidth=0.25pt]{\ltype{root}{}{\textit{M $\times$ N $\times$ O $\times$ P}} }{
\loopback{root}{\varepsilon}
\ltype{tupleitem1}{1}{\textit{M}}
\ltype{tupleitem2}{2}{\textit{N}}
\rtype{tupleitem3}{3}{\textit{O}}
\rtype{tupleitem4}{4}{\textit{P}}
}
    }
    \qquad
    \qquad
    \subfloat[]{
\pstree[nodesep=4pt,levelsep=10ex,arrows=->,linestyle=solid,linewidth=0.25pt]{ \lterm{}{(A,B,C,D)} }{
\lterm{1}{A}
\lterm{2}{B}
\rterm{3}{C}
\rterm{4}{D}
}
    }
    \caption{Type-based indexing for basic tuples. (a) Type tree index for $n$-tuples of type $\alpha_1 \times \dots \times \alpha_n$.\: (b) Type tree index for $4$-tuples of type $\textit{M} \times \textit{N} \times \textit{O} \times \textit{P}$.\: (c) Basic subterm tree for term $(A,B,C,D)$ where $A:\textit{M}$,\: $B:\textit{N}$,\: $C:\textit{O}$,\: $D:\textit{P}$.}
    \label{fig:typetuples}
  \end{center}
\end{figure}

\noindent
Representing basic structures, the usual right branching representation of lists is given in Example \ref{eg:typestructures},  where the basic subterm set at $t|_1$ captures one meaning of a list as a set of values and $t|_\varepsilon$ captures the meaning of a list as a set of sequences.

\begin{example} \label{eg:typestructures}
If $\tau$ is a type of lists such that $\tau = \textit{List M}$, where $\textit{M} \subseteq \mathfrak{B}$ is a nullary type constructor, with associated data constructors $\#$ and $[]$, having signatures $[] : \textit{List M}$, and $\# : \textit{M} \to \textit{List M} \to \textit{List M}$,
then the type tree index set of $\tau$ is $\occurrenceb(\tau)=\{ \varepsilon,1 \}$.
If basic terms $s,t \in \mathfrak{B}_{\textit{List M}}$ are the lists $s=[A,B,C]$ and $t=[A,D]$, then as can be seen from \figurename \ref{fig:typestructures}, the basic subterm sets of $s$ and $t$ are 
$s|_\varepsilon = \{ [A,B,C], [B,C], [C], [] \}$,
$s|_1 = \{ A,B,C \}$, and 
$t|_\varepsilon = \{ [A,D], [D], [] \}$, 
$t|_{1} = \{A,D\}$.
\end{example}

\begin{figure}[h]
  \begin{center}
    \subfloat[]{
\pstree[nodesep=4pt,levelsep=8ex,arrows=->,linestyle=solid,linewidth=0.5pt]{\ltype{root}{}{\textit{List $\alpha$}} }{
\loopback{root}{\varepsilon}
\TR[name=tupleitem201,edge=none]{}
\ltype{parameter351}{1}{\alpha}
\TR[name=tupleitem202,edge=none]{}
}
    }
    \qquad
    \qquad
    \subfloat[]{
\pstree[nodesep=4pt,levelsep=8ex,arrows=->,linestyle=solid,linewidth=0.25pt]{ \lterm{}{\#} }{
\lterm{1}{A}
\pstree{ \rterm{}{\#} }{
\lterm{1}{B}
\pstree{ \rterm{}{\#} }{
\lterm{1}{C}
\rterm{}{[]}
}
}
}
    }
    \qquad
    \qquad
    \subfloat[]{
\pstree[nodesep=4pt,levelsep=8ex,arrows=->,linestyle=solid,linewidth=0.25pt]{ \lterm{}{\#} }{
\lterm{1}{A}
\pstree{ \rterm{}{\#} }{
\lterm{1}{D}
\rterm{}{[]}
}
}
    }
    \caption{Type-based indexing for basic structures. (a) Type tree index for $\textit{List }\alpha$. (b) and (c) Basic subterm trees for terms $[A,B,C]$ and $[A,D]$ of type $\textit{List M}$ where $A,B,C,D : \textit{M}$.}
    \label{fig:typestructures}
  \end{center}
\end{figure}

\noindent
For basic abstractions, a set is given in Example \ref{eg:typeabstractionsets} and a multiset in Example \ref{eg:typeabstractionmultisets}. For both sets and multisets, $t|_1$ captures the meaning as a set of values whereas $t|_2$ will always be $\{\top\}$ for sets and a set of multiplicities for multisets. 
A corollary of Definition \ref{def:typetreeindexset} is that the type tree index set of a basic abstraction type is always $\{ \varepsilon,1,2 \}$.

\begin{example} \label{eg:typeabstractionsets}
If $\tau$ is a basic abstraction type representing sets such that $\tau = \textit{M} \to \Omega$, where $\textit{M} \subseteq \mathfrak{B}$ is a nullary type constructor, then the type tree index set of $\tau$ is $\occurrenceb(\tau) = \{ \varepsilon,1,2 \}$.
If basic term $t=\{A,B,C\}$, where $A,B,C:\textit{M}$, then the basic subterm sets are
$t|_\varepsilon = \{ \{A,B,C\} \}$,
$t|_1 = \{ A,B,C \}$ and 
$t|_2 = \{ \top \}$.
\end{example}

\begin{example} \label{eg:typeabstractionmultisets}
If $\tau$ is a basic abstraction type representing multisets such that $\tau = \textit{M} \to \textit{Nat}$, where $\textit{M} \subseteq \mathfrak{B}$ is a nullary type constructor and $\textit{Nat}$, then the type tree index set of $\tau$ is $\occurrenceb(\tau) = \{ \varepsilon,1,2 \}$.
If basic term $t=\{A,A,A,B,C,C\}$, where $A,B,C:\textit{M}$, then the basic subterm sets are
$t|_\varepsilon = \{ \{A,A,A,B,C,C\} \}$,
$t|_1 = \{ A,B,C \}$ and 
$t|_2 = \{ 1,2,3 \}$.
\end{example}


\begin{figure}[h]
  \begin{center}
    \quad
    \subfloat[]{
       \label{set_type}
\pstree[nodesep=4pt,levelsep=10ex,treesep=2em,arrows=->,linestyle=dotted,linewidth=0.75pt]{\ltype{root}{}{\alpha \to \beta} }{
\loopback{root}{\varepsilon}
\ltype{alpha2}{1}{\alpha}
\rtype{beta2}{2}{\beta}
}
    }
    \qquad
    \subfloat[]{
       \label{set_term}
\pstree[nodesep=4pt,levelsep=10ex,treesep=2em,arrows=->,linestyle=solid,linewidth=0.25pt]{ \lterm{}{\{ A,B,C \}} }{
\lterm{1}{A}
\lterm{2}{\top}
\lterm{1}{B}
\rterm{2}{\top}
\rterm{1}{C}
\rterm{2}{\top}
}
    }
    \quad\:\:
    \subfloat[]{
       \label{multiset_term}
\pstree[nodesep=4pt,levelsep=10ex,treesep=2em,arrows=->,linestyle=solid,linewidth=0.25pt]{ \lterm{}{\langle A,A,A,B,C,C \rangle} }{
\lterm{1}{A}
\lterm{2}{3}
\lterm{1}{B}
\rterm{2}{1}
\rterm{1}{C}
\rterm{2}{2}
}
    }
    \setlength{\belowcaptionskip}{-20pt}
    \caption{Type-based indexing for basic abstractions. (a) Type tree index for type $\alpha \to \beta$. (b) Basic subterm tree for set $\{ A,B,C \}$, type $\textit{M} \to \Omega$, where  $A,B,C : \textit{M}$ and $\top : \Omega$. (c) Basic subterm tree for multiset $\langle A,A,A,B,C,C \rangle$, type $\textit{M} \to \text{Nat}$, where  $A,B,C : \textit{M}$ and $1,2,3 : \text{Nat}$.}
    \label{fig:typeabstractions}
  \end{center}
\end{figure}

A useful and straight forward reformulation of type-based indexing is {\em type name-based indexing} that, instead of enumerating the edges of the type tree, directly labels the vertices of the type tree. The simplest approach being to assign a unique type name to every vertex in the type tree. If the names assigned have no understandable meaning to humans then this method offers no advantages over type-based indexing. However, if the knowledge representational formalism used to define types and data instances uses human-understandable names then type name-based indexing provides a useful notation for referring to basic subterm sets, as illustrated in Example \ref{eg:authortype}.

\begin{example} \label{eg:authortype}
Let $\textit{Author}$ be the type of authors from the publications domain, which define declaratively in the Haskell style syntax from \cite{Gaertner2004ax} as follows.
{\small
\begin{verbatim}
    type Author = (Name,Publications);
    type Name = String;
    type Publications = List Publication;
    type Publication = (Mode,Coauthors,Title,Venue,Year);
    data Mode = Journal | Proceedings | ... | Book;
    type Coauthors = Coauthor -> Bool;
    type Coauthor = String;
    type Title = String;
    type Venue = String;
    type Year = Int;
\end{verbatim}
}
\noindent
This states that $\textit{Author}$ is a pair of $\textit{Name}$ and $\textit{Publications}$, 
where $\textit{Name}$ is an alias for $\textit{String}$ the type of strings, and $\textit{Publications}$ is a list of publications, 
which in turn is a $5$-tuple of $\textit{Mode, Coauthors, \dots, Year}$, 
where $\textit{Mode}$ has the nullary data constructors $\textit{Journal}$, $\textit{Proceedings}$, $\dots$, $\textit{Book}$, and so on through to $\textit{Year}$ which is an alias for the type $\textit{Int}$, the type of the integers. 
$\textit{Coauthors}$ is a basic abstraction from $\textit{Coauthor}$  to $\textit{Bool}$, where $\textit{Bool}$ is the type $\Omega$, i.e. $\textit{Coauthors}$ is a set of $\textit{coauthors}$. 
To ensure the required uniqueness of type names $\textit{Coauthor}$, $\textit{Title}$, $\textit{Venue}$ and $\textit{Name}$ are aliases for the type $\textit{String}$.
The type tree index set is thus 
$\{${\em Author, Author.Name, Author.Publications.Publi-cation.Mode, $\dots$,  Author.Publications.Publication.Year}$\}$.
\end{example}


A type tree index set generated using this method is isomorphic with that produced by Definition \ref{def:typetreeindexset}, as illustrated informally in \figurename \ref{fig:typeauthor}. The constraint that all basic subtypes must be uniquely named permits the following simpler definition of a basic subterm set.


\begin{definition}[Basic Subterm Set (with named types)]
If $t$ is a closed basic term of type $\tau$ and $\alpha \subseteq \tau$ then the {\em basic subterm set of $t$ at type $\alpha$}, denoted $t|_\alpha$, is 
$
t|_\alpha = \{ s\ |\ s \text{ occurs in t with type } \alpha \}.
$ 
A {\em basic subterm set} is a set of basic subterms of a basic term at some type tree $\alpha \subseteq \mathfrak{B}$. A basic subterm is {\em proper} if $\alpha \neq \tau$.
\end{definition}

\begin{figure}[h]
  \begin{center}

\pstree[nodesep=4pt,levelsep=8ex,arrows=->,linestyle=solid,linewidth=0.5pt]{\ltype{root}{}{\textit{Author} = \textit{Name $\times$ Publications}} }{
    \loopback{root}{\varepsilon}
    \ltype{tupleitem27}{1}{\textit{Name}=\textit{String}}
    \pstree[nodesep=4pt,levelsep=8ex,arrows=->,linestyle=solid,linewidth=0.25pt]{\rtype{tupleitem30}{2}{\textit{Publications} = \textit{List Publication}} }{   
       \pstree[nodesep=4pt,levelsep=8ex,treesep=1.5em,arrows=->,linestyle=solid,linewidth=0.25pt]{\ltype{alpha4}{1}{\textit{Publication = Mode $\times$ Coauthors $\times$ Title $\times$ Venue $\times$ Year}} }{
            \ltype{tupleitem26}{1}{\textit{Mode}}
            \pstree[nodesep=4pt,levelsep=8ex,arrows=->,linestyle=dotted,linewidth=0.25pt]{\ltype{tupleitem30}{2}{\textit{Coauthors $=$ Coauthor $\to \Omega$}} }{            
                \ltype{tupleitem27}{1}{\textit{Coauthor}=\textit{String}}
                \rtype{tupleitem27}{2}{\Omega}
            }
            \rtype{tupleitem27}{3}{\textit{Venue}=\textit{String}}
            \rtype{tupleitem27}{4}{\textit{Year}=\textit{Int}}
        }
    }
}

    \caption{Type name-based and type-based indexing for type $\textit{Author}$.}
    \label{fig:typeauthor}
  \end{center}
\end{figure}



%---------------------------------
\subsection{Indexing Basic Term Relations}

Having upgraded our representation of a relation $R : \tau$ to handle structured data represented as basic terms, and having chosen a suitable indexing method for the basic subterm set $\occurrenceb(\tau)$, we are now able to conveniently define the {\em basic term relation index} as the structured data counterpart of the relation index.

\begin{definition}[Basic Term Relation Index]
The {\em basic term relation index} $I_R$ of a basic term relation $R$ of type $\tau$  is $I_R = \occurrenceb(\tau)$.
\end{definition}


%---------------------------------------------------------------------
\subsection{Exact Relational Join for Structured Data}

The upgraded definitions of an exact relation join for structured data closely follow the earlier relational definitions but now using basic term relations and indexing.

\begin{definition}[Basic Term Projection]
If $t \in \tau$, where $\tau \subseteq \mathfrak{B}$, then the {\em basic term projection} $\pi$ of $t$ on $i \in I_t$ is  $\pi_i(t) = \{ s\ |\ s \text{ is the basic subterm at type tree index } i \}$.
\end{definition}

\noindent
A basic term projection $\pi_i(t)$ may also be written as $t|_i$.

\begin{definition}[Basic Term Generalised Projection]
If $t \in \mathfrak{B}$ then the {\em basic term generalised projection} $\pi$ of $t$ on $\rho \subseteq I_t$ is the set $\pi_\rho(t) = \{\ t|_i\ \ |\ \ i \in \rho\ \}$.
\end{definition}

\begin{definition}[Basic Term $\theta$-Restriction]
Let $\theta$ be a predicate  $\theta : (\domainb \to \Omega) \to (\domainb \to \Omega) \to \Omega$ for some $\alpha \in \domainb$. If $A$ and $B$ are basic term relations with basic terms $a|_i \subseteq \domainb$ and $b|_j \subseteq \domainb$ respectively for some $(i,j) \in I_A \times I_B$, then {\em basic term $\theta$-restriction} $\sigma_{i \theta j}$ is defined on $T \subseteq A \times B$ as 
$
\sigma_{i \theta j}(T)
= 
\{ (a,b) \ | \ \ a|_i\, \theta\, b|_j\ \wedge\ (a,b) \in T \}.
$
\end{definition}

\noindent
The predicate $\theta : (\domainb \to \Omega) \to (\domainb \to \Omega) \to \Omega$ is defined on sets of basic terms. In other words, $\theta$ is a predicate on basic term relations.

\begin{definition}[Basic Term Generalised Restriction]
Let $\varphi$ be a proposition that consists of atoms as allowed in basic term $\theta$-restriction and the logical operators $\wedge$, $\vee$ and $\lnot$. If $A$ and $B$ are basic term relations then the {\em basic term generalised restriction} $\sigma_{\varphi}$ is defined on $T \subseteq A \times B$ as 
$
\sigma_{\varphi}(T)
= 
\{\ t \ | \ \varphi(t)\ \wedge\ t \in T \}.
$
\end{definition}

\begin{definition}[Basic Term $\theta$-Join]
Let $\theta : (\domainb \to \Omega) \to (\domainb \to \Omega) \to \Omega$ be a predicate for some type $\alpha \in \mathfrak{B}$. If $A$ and $B$ are basic term relations with basic terms $a|_i \subseteq \domainb$ and   $b|_j \subseteq \domainb$ respectively for some $(i,j) \in I_A \times I_B$ then the {\em basic term $\theta$-join} $\Join_{i\, \theta j}$ of $A$ and $B$ is 
defined as $
A \Join_{i\, \theta j} B = \sigma_{i\, \theta j}(A \times B).
$
\end{definition}

\begin{definition}[Basic Term Generalised Join]
Let $\varphi$ be a proposition that consists of atoms as allowed in basic term $\theta$-restriction and the logical operators $\wedge$, $\vee$ and $\lnot$. If $A$ and $B$ are basic term relations then the {\em basic term join}  
% $\Join_{\varphi}$ is defined as
$
A \Join_{\varphi} B = \sigma_{\varphi}(A \times B).
$
\end{definition}


%---------------------------------------------------------------------
\subsection{Approximate Relational Join for Structured Data}

We assume some distance for basic terms and note that positive semi-definite kernels induce pseudo-metric distances. One suitable kernel is the kernel for basic terms from \cite{Gaertner2004ax} and described earlier, but other kernels and distances may also be suitable.

\begin{definition}[Basic Term Proximity-Join]
\label{def:btproximityjoin}
Let $\approx\ : \domainb \times \domainb \to \Omega$ be a proximity for some $\domainb$ of type $\alpha$. If $A$ and $B$ are basic term relations with subterms $a|_i \in \domainb$ and $b|_j \in \domainb$ respectively for some
$(i,j) \in I_A \times I_B$, then the {\em proximity-join} $\semjoin_{i\, \approx j}$ of $A$ and $B$ is 
defined as $
A \semjoin_{i\, \approx j} B = \sigma_{i\, \approx j}(A \times B).
$
\end{definition}

\noindent
This definition closely parallels that of the approximate relational join on account of the following: the basic term relation is a set which allows the same set theoretic operators from the relational case to apply; the basic term relation index fulfills the same role as the relation index from the relational case; and, finally, the kernel for basic terms' own inductive definition implicitly handles the often  recursive nature of structured data. The closeness in form of the definition of the basic term join to that of the relational join facilitates the following result.

\begin{proposition}
Relational joins are a special case of basic term relational joins.
\end{proposition}

\begin{proof}
Assume relation $R \subseteq \domain_1 \times \dots \times \domain_n$ for some domains $\domain_1,\dots,\domain_n$.
Assume appropriate type constructors and data constructors such that $\domain_1,\dots,\domain_n \subseteq \mathfrak{B}$.
Let basic term relation $S \subseteq \domain_1 \times \dots \times \domain_n$. 
Let $I_R$ be the relation index of $R$ and $I_S$ be the basic term relation index of $S$. Clearly there is a surjection from $I_R$ into $I_S$ and thus from the set of tuple items in each tuple in $R$ to the set of subterms in each corresponding basic term tuple in $S$. Assume the $\theta$ operators are available for basic terms and the result follows.
\qed
\end{proof}

%=====================================================================
\section{Applications}

We have implemented the higher-order relational projection, restriction and join operators and a range of supporting kernels, including the kernel for basic terms, in Prolog. Although Prolog does not natively support the higher-order logic necessary to represent data as basic terms, emulation of typed data, basic tuples, basic structures and basic abstractions (including sets and multisets) has proven to be  unproblematic in practice. We are currently working to characterise and evaluate this framework using the application domain of bibligraphic publications. Heterogenous data sets within this domain include CORA, DBLP, Citeseer and Google Scholar. Interesting higher-order approximate joins between pairs $(A,B)$ of these datasets might, for instance, include the following.

\begin{itemize}
\item $A \semjoin_{\textit{Author.name}} B$, authors in $A$ and $B$ that have similarly names
\item $A \semjoin_{\textit{Author.affiliation}} B$, authors in $A$ and $B$ affiliated to the same institution
\item $A \semjoin_{\textit{Author}} B$, authors in $A$ and $B$ similar across all their properties
\item $A \semjoin_{\textit{Publication.venue}} B$, publications in $A$ and $B$ from the same venue
\item $A \semjoin_{\textit{Publication.coauthors}} B$, publications in $A$ and $B$ with similar coauthors
\end{itemize}

For the sake of evaluation we require the ground truth $V$ for each join to be evaluated, where $V \subseteq A \semjoin B$ and, for the case where the individuals as terms represent publications, $V = \{ (a,b)\ |\ a \in A\ \wedge\ b \in B\ \wedge\ a \textit{ and } b \textit{ are variants of the same publication} \}$. The goal is to reconstruct $V$ as $V' = A \semjoin_{s} B$\, by choosing an appropriate $s$ from the intersection of the basic subterm sets of $A$ and $B$. 
In reality, $V$ is not usually available for pairs of different data sets. For this reason we narrow down our initial evaluation to consider self-joins, $A \semjoin_{s} A$, on a single data set $A=\textit{CORA}$, for which ground truths are available \cite{culotta2005}.\, $\textit{CORA}$ consists of bibliographic citations, hand-labelled with unique identifiers so that variant citations of the same paper share the same identifier\footnote{The specific CORA data set used is an aggregation of all three CORA-REFS citation matching data sets ({\em fahl-labeled}, {\em kibl-labeled}, and {\em utgo-labeled}). The raw CORA-REF files have numerous XML mark-up errors which we have manually corrected to enabled parsing.}.

Given this supervised learning setting, a number of distance-based methods could be used to implement the approximate join, including $k$-means, $k$-NN, and agglomerative hierarchical clustering. We chose the latter for this initial investigation on the basis that it produces a dendrogram that is useful in visualising and charactering the join. Although this is a clustering method more normally associated with unsupervised learning, here we are able to make use of the ground truth labelling to achieve a supervised setting. A dendrogram represents a progressive series of joins (or clusterings), with instances in the same cluster being leaves of the same sub-tree. The distance value at each non-terminal node represents a potential threshold $\delta$ at which to `cut' the tree and arrive at set of clusters.\, $\delta$ is the threshold from the proximity predicate from Definition \ref{def:btproximityjoin}. To evaluate the quality of the clustering at a given $\delta$ we consider whether each pair of instance data is correctly classified as being in the same cluster or in different clusters; in other words we evaluate a binary classification on all pairs of instances to determine if the two instances in a pair refer to the same publication or to different publications. A confusion matrix is then calculated in order to determine precision and recall for this specific value of $\delta$. To characterise a proximity-join across a range of thresholds we vary $\delta$ across the length of the tree and plot a precision-recall chart.

To represent the publications we choose the following type structure\footnote{$\textit{Year}$ is string rather than a numeric type due to non-numeric characters in the data. Also, $\textit{Venue}$ is constructed as a concatenation of venue-related fields; CORA has no venue field.}.

{\small
\begin{verbatim}
    type Publications = Publication -> Bool;
    type Publication = (Coauthors,Title,Venue,Year);
    type Coauthors = Coauthor -> Bool;
    type Coauthor = String;
    type Title = String;
    type Venue = String;
    type Year = String;
\end{verbatim}
}

\noindent
Hence by representing CORA as a basic terms relation of type $\textit{Publications}$, where $\mathfrak{B}_\textit{Publications} \in \mathfrak{B}$, we are able to execute the following basic term proximity-joins:
\begin{itemize}
\item $\textit{CORA} \semjoin_{\textit{Publication.Title}} \textit{CORA}$, a self join on only the publication's $\textit{Title}$ subterm;
\item $\textit{CORA} \semjoin_{\textit{Publication.Venue}} \textit{CORA}$, a self join on only the publication's $\textit{Venue}$ subterm;
\item $\textit{CORA} \semjoin_{\textit{Publication.Coauthors}} \textit{CORA}$, a self join on only the publication's $\textit{Coauthors}$ subterm, which is in turn a set of $\textit{Coauthor}$ subterms;
\item $\textit{CORA} \semjoin_{\textit{Publication}} \textit{CORA}$, a self join on the entire $\textit{Publication}$ term.
\end{itemize}

\noindent
For each join, to keep results comparable, we choose the $p$-spectrum(2) kernel for strings and accept the default kernels for all other types. We do not optimise the default kernel for basic terms by choosing weighting modifiers that, for example, might be used to encode the intuition that a year of publication is less discriminating than the title of a publication when aggregated into an overall kernel on publications.

\begin{figure}[h]
  \begin{center}
    \epsfig{file=ilp2008_cora-refs_dendro5.eps,width=100mm,height=30mm}
    \epsfig{file=ilp2008_cora-refs_dendro2.eps,width=100mm,height=70mm}
    \setlength{\abovecaptionskip}{0pt}
    \setlength{\belowcaptionskip}{-20pt}
    \caption{Dendrogram (above), showing clusterings at successive thresholds for a proximity-joins on the CORA publication type, and (below) a close-up showing labelled ground truths.}
    \label{fig:dendro}
  \end{center}
\end{figure}

For each of these four joins we constructed a dendrogram such as \figurename \ref{fig:dendro} and calculated the corresponding precision-recall chart in \figurename \ref{fig:cora1-4}. Note that the trivial reflexive pairs, i.e. cluster sizes of 1, are ignored in the plots as they convey no useful information here and so lines are not interpolated to the top left of the chart (recall=0, precision=1). As would intuitively be expected, joins on $\textit{Publication.Title}$ is generally a better discriminator of publications than $\textit{Publication.Coauthors}$ and $\textit{Publication.Venue}$. However, the default kernel for basic terms clearly effectively aggregates the information contained the subterms of $\textit{Publication}$ to outperform any single one of the three subterms taken in isolation. The only exception being $\textit{Publication.Title}$, which sometimes outperforms its parent $\textit{Publication}$ above recall values greater than $0.9$.

\begin{figure}[h]
  \begin{center}
    \epsfig{file=ilp2008_cora-refs_1-4_c.eps,width=110mm}
    \caption{Precision and recall for various decompositions of CORA publication type.}
    \label{fig:cora1-4}
  \end{center}
\end{figure}


%=====================================================================
\section{Related Work}

Our work, to the best of our knowledge, is a unique combination of the relational model with a higher-order representation and distance-based methods. Thus we now describe our work with respect to related work in  three related fields: relational learning, knowledge representation, and distances for structured data.

In database literature and more recently, particularly since the advent of the Web, within the relational learning literature, there has been considerable interest in the data integration aspects of database deduplication and record linkage \cite{lawrence:agents99,newman2001}. However, in addition to dealing with heterogeneous data structures, our work adopts an {\em individuals as terms} representation so that both type and structure of data is not obfuscated by traditional relational representations. Therefore our approach has the advantage of simplifying data modelling and the application of approximate matching techniques. Despite this, it should be noted that we propose the higher-order relational representation solely as an approach for data integration tasks, not as a replacement for general purpose relational databases. Our present implementation certainly has none of the optimisations of a modern relational database. Ultimately though, a higher-order view could be layered on top of a traditional relational database system, efficiently combining the two approaches, so that higher-order queries are automatically translated into and executed as equivalent relational queries.

Our goal of integrating and querying heterogeneous data is also a goal shared by the Semantic Web community \cite{Berners-Lee2001mb}. The fundamental data model of the Semantic Web is the directed labelled graph, represented as RDF triples, which may be queried using the SPARQL query language \cite{Prudhommeaux2005vn}. Data structures such as lists, sets, multisets, trees and graphs are readily supported through RDF Schema and the OWL  ontology language \cite{McGuinness2004jy} and as such have similar representational advantages to basic terms as compared to the relational model. SPARQL queries can be used to retrieve a subgraph describing an individual that is analogous to a representation of that individual as a basic term. Conversely, it is straight forward to transform the same subgraph into a basic term in order to apply our own approach to RDF data. For RDF data integration, or {\em smushing} as it is informally known, the emphasis in the Semantic Web languages to-date has been on exact matching, using inverse-functional properties such as email addresses, homepage URLs or entity URIs. This is an obvious shortcoming in the presence of noisy data or representational variations between data from different sources. To address the consequent data integration problem, work has been done in the area of ontology matching, including work on measuring proximity between ontologies \cite{Maedche2002vn}. Our approximate matching work differs from this explicit semantic integration approach in that we rely primarily on the implicit semantics of the type structure and data instances themselves. This is an advantage in cases where detailed ontological information is not available but potentially a disadvantage in other cases because background knowledge encoded in an ontology is not exploited in our approximate joins. The incorporation of background knowledge into our approximate joins is an area for future work.

Turning now to related work on distances, we first note that kernels and distances used in this paper are not of themselves a contribution of our work. Also, the choice of the default kernel for basic terms is not a specific requirement for the approximate relational join; any distance for basic terms would be suitable. Prior work on distances for logical terms includes distances between Herbrand interpretations \cite{DBLP:conf/ilp/Nienhuys-Cheng97} and between first-order terms (including structures and lists) \cite{DBLP:conf/ilp/Sebag97,DBLP:conf/ilp/BohnebeckHW98,DBLP:conf/ilp/KirstenW00}. None of these directly apply to basic terms and while it may be possible to apply distances on first-order terms to our first-order representation of basic terms, the semantics of basic abstractions would be lost as a result. Most closely related to our work, are various similarity-based methods that have been upgraded to handle structured data \cite{Gaertner2004ax,Bhattacharya2005dq,DBLP:conf/pakdd/WoznicaKH05}. Contrasting approaches apply probabilistic models to take account of dependencies between resolution decisions \cite{parag:mrdm04,bhattacharya:sdm06}. Most recently, a family of pseudo-distances over the set of objects in a knowledge base has been introduced although not specifically for basic terms \cite{DBLP:conf/ilp/dAmatoFE07}.


%=====================================================================
\section{Conclusion}

In this paper we have combined two contrasting knowledge  representational approaches, the relational model and basic terms, into a single coherent formalism that is well suited to the integration of heterogeneous data. This, in conjuction with the default kernel for basic terms has been shown to have potential for data integration and to be worthy of further investigation.


%=====================================================================
% Bibliography

\bibliographystyle{splncs}
\bibliography{approxjoin}

%=====================================================================
%=====================================================================
%\newpage
%
%%%%%%%%%%%%%%%%%%%%%%%% SCRATCH PAD %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\section{SCRATCH PAD}
%
%\begin{definition}[Basic terms \cite{Lloyd2003bh}]
%The set of {\em basic terms}, $\mathfrak{B}$, is defined inductively as follows.
%\begin{enumerate}
%
%\item If $C$ is a data constructor of arity $n$ and $t_1, \dots, t_n \in \mathfrak{B} \ (n \in \mathbb{N})$ such that
%$C\ t_1 \dots t_n \in \mathfrak{L}$, then $C\ t_1 \dots t_n \in \mathfrak{B}$.
%
%\item If $t_1,\dots,t_n \in \mathfrak{B}$, $s_1,\dots,s_n \in \mathfrak{B}$, 
%$t_1 < \dots < t_n$, $\ s_i \notin \mathfrak{D}$, for $1 \leq i \leq n \ (n \in \mathbb{N})$, $s_0 \in \mathfrak{D}$ and
%\[
%    \lambda x \text{ if } x=t_1 \text{ then } s_1 \text{ else } \dots 
%     \text{ if } x=t_n \text{ then } s_n \text{ else } s_0 \in \mathfrak{L},
%\]
%then
%\[
%    \lambda x \text{ if } x=t_1 \text{ then } s_1 \text{ else } \dots 
%     \text{ if } x=t_n \text{ then } s_n \text{ else } s_0 \in \mathfrak{B}.
%\]
%
%\item If $t_1,\dots,t_n \in \mathfrak{B}\ (n \in \mathbb{N})$ and $(t_1,\dots,t_n) \in \mathfrak{L}$, then $(t_1,\dots,t_n) \in \mathfrak{B}$.
%
%\end{enumerate}
%\end{definition}
%
%The basic terms from Part 1 of the definition are called {\em basic structures}, those from Part 2 are called {\em basic abstractions}, and those from Part 3 are called {\em basic tuples}.


\end{document}
