\documentclass[twoside,11pt]{article}

% Any additional packages needed should be included after jmlr2e.
% Note that jmlr2e.sty includes epsfig, amssymb, natbib and graphicx,
% and defines many common macros, such as 'proof' and 'example'.
%
% It also sets the bibliographystyle to plainnat; for more information on
% natbib citation styles, see the natbib documentation, a copy of which
% is archived at http://www.jmlr.org/format/natbib.pdf0.



\usepackage{afterpage}
\usepackage{fixltx2e}
\usepackage{float}
\usepackage{jmlr2e}
\usepackage{listings}
\usepackage{changepage}
\usepackage{epsfig}
\usepackage{dsfont}
\usepackage{array}
\usepackage{amsmath}
\usepackage{fancyvrb}
\usepackage{booktabs}
\usepackage{mathptmx}
\usepackage{sidecap}
\usepackage{verbatim}



% El segundo paquete permite agregar imagenes eps y de otros formatos como jpg y png en el mismo documento
\usepackage{graphicx}
\usepackage{epstopdf}
\usepackage{natbib}

\usepackage{algpseudocode}
\usepackage{algorithm}
\usepackage{xparse}
\let\oldState\State% Store \State in \oldState
\RenewDocumentCommand{\State}{o}{% \State[<num>]
  \IfValueTF{#1}{\makeatletter\setcounter{ALG@line}{#1}\addtocounter{ALG@line}{-1}\makeatother}{}%
  \oldState\ignorespaces%
}

% Provides \labelformat, which changes how \ref references look
\usepackage{varioref}

% \setlength{\textwidth}{\dimexpr\pdfpagewidth-.2.5in}% Equal left/right margins
% ESTO PERMITE USAR LOS SIMBOLOS DE VALOR ABSOLUTO
% ESTO PERMITE USAR LOS SIMBOLOS DE VALOR ABSOLUTO
\newcommand{\abs}[1]{\left\vert#1\right\vert}

\RequirePackage{fix-cm}

\usepackage{color}
\definecolor{lightgray}{rgb}{.94,.94,.94}
\definecolor{darkgray}{rgb}{.3,.3,.3}
\definecolor{purple}{rgb}{0.65, 0.12, 0.82}
\definecolor{blue}{rgb}{0.0, 0.0, 0.82}
\definecolor{red}{rgb}{0.83, 0.0, 0.0}
\definecolor{green}{rgb}{0.1, 0.55, 0.1}
% colores para los listados del código fuente
\definecolor{codebackground}{rgb}{0.9,0.9,0.9}
\definecolor{gray97}{gray}{.97}
\definecolor{gray75}{gray}{.75}
\definecolor{gray45}{gray}{.45}
\definecolor{gray90}{gray}{.87}
\definecolor{UrlColor}{rgb}{0,0.08,0.45}

\lstdefinelanguage{Prolog}{
 frame=Ltb,
 framerule=0pt,
 aboveskip=0.5cm,
 framextopmargin=3pt,
 framexbottommargin=3pt,
 framexleftmargin=0.4cm,
 framesep=0pt,
 rulesep=.4pt, backgroundcolor=\color{gray90},
 basicstyle=\small,
% frame=trBL,
% linewidth=8cm,
% backgroundcolor=\color{lightgray},
keywords={modeh,modeb,determination,set,
discretization,grouping,
lazy_evaluate,to_be_discretized,
discretized,rmode,threshold,posonly},keywordstyle=\bfseries,
commentstyle=\small\color{darkgray}\ttfamily, tabsize=2,
% En este caso l indica que es solo una letra el identificador para los comentarios % .......
% En este caso s indica que es una cadena encerrada entre dos delimitadores para los comentarios /* ..... */
morecomment=[l]{\%},
morecomment=[s][\color{red}]{/*}{*/},
emph={adult,gteq,lteq,number,age,person,A,C,B,D,E,F,G,H,I,J,K,L,Age,Person,class,
Animal,reptile,mammal,fish,bird,Cover,member,feathers,hair,scales,none,
true,false,link,active,atm,no,attribute,logp,not_pay,
absence_from_school,enrolled,male,
S,Int,School,unemployed,ucb,ucla,uci,smc,
bottom,clause,Extended,Aleph,Pos,Rule,Neg,
Theory,theory,not_pay,cover,PosCover,NegCover,
VarType,Arity,Goal_Pred,
filed_for_bankruptcy,S,
discr_split,evalfn,
grp_subset,
grp_member,
discr_lteq,
discr_gteq,
discr_between,
goal,pos,neg,Positive,Negative,example,format,Aleph,TILDE,
lit,T1,T2,t1,t2,t3}, emphstyle=\ttfamily,
%moredelim=[l][\ttfamily]{,},
%moredelim=[l][\ttfamily]{\{},
%moredelim=[l][\ttfamily]{\}},
%moredelim=[l][\ttfamily]{(},
%moredelim=[l][\ttfamily]{)},
%moredelim=[l][\ttfamily]{[},
%moredelim=[l][\ttfamily]{]},
literate={\#}{\bfseries{\#}}{1}
         {*}{\bfseries{*}}{1}
         {+}{\bfseries{+}}{1}
         {-}{\bfseries{-}}{1}
         {=<}{\bfseries{=<}}{1}
         {=}{\bfseries{=}}{1}
         {=>}{\bfseries{>=}}{1}
				 {/}{\bfseries{/}}{1}
				 {:-}{\bfseries{:-}}{1}
				 {:}{\bfseries{:}}{1}
				 {[}{\rmfamily{[}}{1}
				 {]}{\rmfamily{]}}{1}
				 {(}{\rmfamily{(}}{1}
				 {)}{\rmfamily{)}}{1}
				 {\{}{\rmfamily{\{}}{1}
				 {\}}{\rmfamily{\}}}{1}
				 {,}{\bfseries{,}}{1}
}

\newtheorem{defi}{{\sc {Definition} } }[section]




% Definitions of handy macros can go here

\newcommand{\dataset}{{\cal D}}
\newcommand{\fracpartial}[2]{\frac{\partial #1}{\partial  #2}}

% Heading arguments are {volume}{year}{pages}{submitted}{published}{author-full-names}

\jmlrheading{1}{2000}{1-48}{4/00}{10/00}{Orlando Mu\~noz-Texzocotetla and Ren\'e MacKinney-Romero}

% Short headings should be running head and authors last names

\ShortHeadings{An evolving approach}{Mu\~noz-Texzocotetla and MacKinney-Romero}
\firstpageno{1}



\begin{document}

\title{An evolving approach to handle numerical and categorical attributes in inductive logic programming}
 

\author{\name Orlando Mu\~noz-Texzocotetla \email magicorlan@gmail.com \\
        \name Ren\'e MacKinney-Romero \email rene@xanum.uam.mx \\
        \addr Departamento de Ingenier\'ia El\'ectrica\\
        Universidad Aut\'onoma Metropolitana\\
        M\'exico, D.F. 09340, M\'exico}


\editor{Leslie Pack Kaelbling}

\maketitle

\begin{abstract}%   <- trailing '%' for backward compatibility of .sty file
Inductive logic programming (ILP) induces concepts from a set of positive examples,
a set of negative examples, and a background knowledge.  ILP has been applied
on tasks such as natural language processing,
finite element mesh design, 
network mining, 
robotics or drug discovery which use relational datasets. 
These datasets usually contain numerical and categorical (with several
values) attributes, however few relational learning systems are  capable to handle 
them in an efficient way. 
In this paper we present a novel approach to improve learning where numerical and 
categorical attributes are involved by enriching the language which is used to
construct relational rules. Although this was implemented in
an inductive logic programming system, called Aleph, it can be applied
to any relational learning system.
Our proposal is based on inducing numerical subintervals and 
categorical subsets by means of genetic algorithms. These subintervals and subsets are added to the background
knowledge to construct the candidate rules.
We compared this approach with discretization and lazy evaluation in Aleph and
discretization in the TILDE system. The results obtained
showed that our method improves the accuracy
as well as reducing the number of rules in most cases. Finally, we discuss these results and possible
lines for future work.
\end{abstract}

\begin{keywords}
  ILP, Numerical Attributes, Categorical Attributes, Discretization, Grouping
\end{keywords}

\section{Introduction}
\label{intro}


Inductive Logic Programming (ILP) is 
a subfield of relational learning which overcomes two drawbacks of propositional learning:
a limited data representation (propositional
logic language), and the inability to use background knowledge
during the learning process. ILP has been applied to 
complex domains such as: natural language processing,
finite element mesh design,
modeling of dynamic systems, 
network mining, 
robotics, drug discovery, etc. We refer to
\citet{lavrac1,Muggleton99inductivelogic,Muggleton_2012_IT_2123932_2123939,BergadanoGu95}
for consulting these and other applications.
Formally, ILP is defined as the intersection between machine learning and logic 
programming \citep{muggleton1}.
Therefore, theories are induced from a set of examples.
Although unlike other learning systems,
ILP algorithms do not use exclusively examples, but they
use also information about the problem domain: {\em background knowledge.}


The language used in ILP is based on logic programming.
Thus, a logic program can
represent a set of positive examples $E^+$, a set of negative examples
$E^-$, a background knowledge $B$, and/or a theory $T$.
The main goal is to find a theory $T$ from a set of positive $E^+$ and negative $E^-$
examples, and from a background knowledge $B$. This task, also called Normal Setting ILP,
is described as follows:

{\flushleft {\bf Normal Setting ILP}}\\

{\bf Given:} 
    \begin{itemize}
		     \item a finite set of clauses $B$,
				 \item a finite set of clauses $E^+$, and 
				 \item a finite set of clauses $E^-$
     \end{itemize}

\medskip		

{\bf Find a theory  $\mathbf{T}$ (set of hypotheses or rules), such that} 
     \begin{itemize}
             \item $B \wedge T \models E^+$  \emph{ (completeness)},  and
						 \item $B \wedge T \not\models E^-$ \emph{ (consistency)}
			\end{itemize}

If $T$ is complete and consistent then $T$ is \emph{ correct.}
It must be note that $T$ may not be accurate for unseen examples.

The ILP task can be viewed as a search problem.
This search process is performed by a learner which can be described by means of
three elements: a search space, a search strategy and a search heuristic. 

Regarding the search space, we can remark the following issue:
A very restricted language bias\footnote{In general, a bias is a mechanism to
restrict the search space \citep{UtgoffM82}.  The language bias is the set
of all hypotheses that an learner may construct.} generates a very small search space,
in which case the learner is very efficient, but it is likely that the theory
found does not represent a suitable solution for the target concept.
For instance, if a language bias does not allow numerical constants,
then the final theory will not be able to express relations
that require numerical results.

ILP systems usually define a language bias which test a single constant 
value per argument while constructing hypotheses, thus may induce inaccurate 
theories with many rules. 
Suppose that an ILP system induced the following rule, which indicates whether a 
person $A$ is an adult or not, without adding new predicates in the background knowledge and
without processing numerical arguments. In addition, the goal relation is $adult/2$\footnote{{\em predicate/arity}}
and the relation in the background knowledge is $age/2$.


\begin{equation*}
      adult(A) \stackrel{}{\leftarrow} age(A,19)
			\label{clauseage1}
\end{equation*}			

This clause explains that a person $A$ is an adult if he/she is $19$ years old,
this can be seen as a theory that explains whether a person is an adult or not, 
but to be accurate it has to contain many hypotheses
as the preceding clause, i.e. a clause for each age. Therefore, if all values are not represented
in the examples and the background knowledge, then the final theory will be inaccurate. In our example,
not all adults will be explained.

The easiest way to overcome that drawback is to weaken manually the language bias. 
For this, we can add the predicate $> / 2$ to obtain a theory like this:

\begin{equation*}
         adult(A) \stackrel{}{\leftarrow} age(A,B) \wedge B > 17
         \label{clauseage2}
\end{equation*}

This clause is a good representation of the accurate age of adults (depending on the data given), 
and it is also more expressive than the previous clause. 


This problem can also appear with categorical arguments with several possible values. Let us
see a toy example, suppose the goal relation {\em quadrilateral}$/2$ which explains whether a
geometric figure is a quadrilateral or not. The two theories shown in table \ref{categorical_theories}
classify the same set of geometric figures, however the relation {\em member}$/2$ reduces the number of rules 
of the theory $2$.


\begin{table}[h]
% For LaTeX tables use
\centering
	\scalebox{0.9}[0.9]{
\begin{tabular}{|p{6cm}|p{10cm}|}
\hline
					{\bf Theory 1}  & {\bf Theory 2} \\
\hline
         $quadrilateral(A,yes) \stackrel{}{\leftarrow} sides(A,4)$
				
         $quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,3)$
				
         $quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,5)$
				
         $quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,6)$
				
         $quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,7)$
				
         $quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,8) $ &
				  
				$quadrilateral(A,yes) \stackrel{}{\leftarrow} sides(A,B), member(B,\left[4\right])$
         
				$quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,B), member(B,\left[3,5,6,7,8\right])$ \\				
\hline
\end{tabular}}
\label{categorical_theories}       % Give a unique label
\caption{These theories explain whether a geometric figure is a quadrilateral or not. The predicate
{\em member}$/2$  in theory $2$ reduces the number of rules.}
\end{table}

The above examples are very simple, but they illustrate the main focus
in this paper: the handling of numerical and categorical attributes. 
From these examples, and from a learning point of view, it is useful 
to ask the following questions

%For this, it may be useful  to ask the following questions:

 \begin{itemize}			
			 \item How we know that \mbox{\em 17} is the best split?
			 
			 \item How to determine the subsets $\left[4\right]$ and $\left[3,5,6,7,8\right]$?
			
			 \item How many splits (or subsets) should be created?
			 
			 \item How to determine that the predicates $>/2$ and {\em member}$/2$ are the right ones for the above examples?
       
       \item In general, how to find the best splits/subsets and predicates?
			
			 \item Can always the best splits, subsets and predicates improve the accuracy and/or reduce the theory size?
 \end{itemize}


To answer these questions, we present an approach called Grouping and Discretization to Background Knowledge 
Enrichment - GD-BKE which can deal
with both numerical and categorical attributes (in this paper, the 
terms ``attribute'' and ``argument'' are used interchangeable).


When an attribute is numerical, several splits are tested with a genetic algorithm
and the best splits (chromosomes) are added to the background knowledge, {\em Discretization.}
If the attribute is categorical then several subsets of values are tested, and
the best subsets (chromosomes) are added to the background knowledge, this is called {\em Grouping. }

Both numerical and categorical attributes are discretized/grouped taking in account two 
or more classes.
Then this information (subsets and splits) together with new predicates are added to the
language space. This approach was implemented in the Aleph system and compared
with two strategies: lazy discretization in Aleph
and discretization in the TILDE system.

The main contributions of this paper include:

\begin{itemize}
	\item Two methods to deal with both numerical and categorical attributes.
	
	\item They include multi-class functions  (not just positive and negative).
	
	\item An easy way to declare the attributes to be handled.
	
	\item The proposed methods are independent from the search strategy and refinement operators. Therefore,
	      this can be tested in any relational learning system.
\end{itemize}

The remainder of this paper is structured as follows. 
Section \ref{previous_work} provides a review of some strategies to handle
numerical and categorical attributes in ILP. Our approach is presented in Section \ref{proposed_method}.
Our experiments and results are discussed  in Section \ref{experimental_results}.
Finally, Section \ref{conclusions} presents our conclusions and future work.





\section{Previous work}
\label{previous_work}

This section presents some strategies to deal with numerical and categorical
attri\-butes in ILP. Since we compared our method with lazy discretization in the Aleph
system and discretization in the TILDE system, we take a better look to these two methods. 



\subsection{Lazy Discretization in Aleph}

{\em A Learning Engine for Proposing Hypotheses (Aleph)} is an ILP system which
was implemented in Prolog \citep{aleph1}. This system is able to emulate some functionalities
of other ILP systems like Progol \citep{muggleton1}, FOIL \citep{quinlan1}, WARMR \citep{Dehaspe99discoveryof} and 
FORS \citep{Karalic:1997:FOR:251646.251651}. 
The basic Aleph algorithm consists of the four major steps shown in the algorithm \ref{basic_aleph},
for more details see \citep{aleph1}.

\begin{algorithm}
\caption{Basic Aleph algorithm}\label{basic_aleph}
\begin{algorithmic}[1]
\State[1] Select an example $e$.

\State  Saturate $e$ to construct its bottom clause (saturation stage).

\State Search for a clause more general than the bottom clause.

\State Add the best clause to the theory, remove redundant examples. Go to step 1.
\end{algorithmic}
\end{algorithm}

The basic Aleph algorithm can not process the numerical and categorical (with 
a lot of values) attributes in an efficient way because it tests a single value per
literal while constructing bottom clauses.
\citet{Srinivasan99numericalreasoning} propose a lazy evaluation
for numerical predicates in order to improve the numerical capabilities of the ILP systems.
Lazy evaluation in Aleph comprises two important elements:

\begin{itemize}
	\item {\em Including numerical predicates within the background knowledge.}
	      The predicates capable to perform numerical calculations and their declarations
	      are added into the background knowledge. Declarations tell Aleph how the rules
				are built. Declarations include three {\em modes} which define the
				type of argument:
				$+$ is an input attribute which is expected to be instantiated,
				$-$ is an output attribute which is not expected to be instantiated,
				$\#$ is a constant value. The meta-predicate {\em lazy\_evaluation}
				is a declaration that indicates the predicates to be evaluated.
				For instance, if we want to discretize an argument $A$ of a literal, then
				we must add the predicates less or equal to, and greater or 
				equal to (\lstinline[language=Prolog]!lteq(+A,#d), gteq(+A,#d)!),
				the meta-predicates \lstinline[language=Prolog]!lazy_evaluation(lteq/2), lazy_evaluation(gteq/2)!,
				and the code to discretize the argument $A$ and find
				the split point $d$.
				
	\item {\em Lazy evaluation of numerical predicates.}
	      The basic Aleph algorithm can not evaluate numerical predicates that
	      require a set of values related to more than one example, 
				since the search is guided by a single example. 
				The lazy evaluation approach presented by \citet{Srinivasan99numericalreasoning}
				consists of evaluating the numerical predicates until their 
				results are required, i.e. before starting the search process.
				The user may define numerical predicates such as discretization functions,
				arithmetic and trigonometric functions, regression functions, equalities
				and inequalities.
\end{itemize}

The efficiency of the lazy evaluation in Aleph depends largely on the 
numerical predicates defined by the user. Therefore, it is clear
that the user needs to know how to program in a logic language
which it is not convenient, especially if the user (researcher in other topics) 
has no idea how to do that. Furthermore, as noted above some categorical arguments may
have a lot of values, however the authors of the lazy evaluation in ILP presented 
in \citep{Srinivasan99numericalreasoning} do not mention anything about it.




 

\subsection{Discretization in TILDE}

One of the advantages offered by the propositional learning
over ILP is the handling of numerical attributes.
This advantage is adapted to the ILP context in the
\emph{Top-Down Induction of Logical DEcision Trees - TILDE} 
system \citep{Blockeel97lookaheadand}.
In this system the theories are represented as logic decision trees,
and the learning is based on interpretations.
Within learning from interpretations each example
is represented as a set of tuples in a relational database.
\emph{Learning from interpretations}
is described in \citet{Blockeel97lookaheadand} as follows:


{\flushleft {\bf Learning from interpretations}}\\

{\bf Given:}

\begin{itemize}
	\item a set of classes $C$,
	
	\item a set of classified examples $E$, and
	
	\item a background knowledge $B$.
	
\end{itemize}

\medskip

{\bf Find a theory $T$, such that $\forall e \in E:$}

	\begin{itemize}
	    \item  $H \wedge e \wedge B \models c$ {\emph (completeness)}, and 
			
			\item  $H \wedge e \wedge B \not\models c'$ {\emph (consistency)}
  \end{itemize}

Where $c$ is the class of the example $e$ and $c' \in C -  \left\{c\right\}$.\\


TILDE deals with numerical attributes by means of the discretization
based on the Minimum Description Length principle  - MDL \citep{FayyadI93}.
This discretization approach has been adapted in the following manner.
First, the user must declare the attributes to be discretized.
For this, the meta predicate \lstinline[language=Prolog,basicstyle=\ttfamily]!to_be_discretized! indicates 
both the literal as well as its numeric arguments (attributes).
Here is an example illustrating a declaration that indicates the argument to
be discretized:


\begin{lstlisting}[language=Prolog, frame=shadowbox]
to_be_discretized(age(Person, Age), [Age]).
\end{lstlisting}


Then, each argument declared is discretized one time before the learning process
(global discretization). 
Thus, for above declaration the following result might be obtained:

\begin{lstlisting}[language=Prolog]
discretized(adult(Person, Age),[Age], [18]).
\end{lstlisting}

To add the numerical splits in the candidate hypotheses the user can define equalities or
inequalities with modes declarations (as in the Aleph system).
For the result shown in the preceding example an inequality
is used to indicate all numerical values less than the split $C$.

\begin{lstlisting}[language=Prolog, , frame=shadowbox]
rmode(#(C:threshold(employee(_, _, _, Age),[Age], C), +Age < C)).
\end{lstlisting}

The predicate {\em rmode} determines the refinement operator and
the language bias. In this case {\em rmode} indicates
that the literal {\em employee} must be added to
refine a clause. The variable {\em Age} will be compared with
the threshold $C$. 
Details of the predicate {\em rmode} are presented in \citet{ace1}.

Unlike the discretization that is performed with lazy evaluation in the Aleph system,
TILDE discretizes each numerical attribute one time before learning process,
this is called global discretization. This may be inefficient since
the global discretization uses the entire dataset. However,
the set of examples changes during learning process.
Other ILP systems that use global discretization based on 
MDLP are: ICL \citep{Laer96multi-classproblems,Raedt95inductiveconstraint},
and ECL-GSD \citep{DivinaM05}.



\subsection{Constraint Logic Programming in NUM algorithm}

{\it Constraint Logic Programming (CLP)} is a programming paradigm in
which a {\it Constraint Satisfaction Problem (CSP)} is proposed in a
logical language. CLP joins two declarative programming paradigms:
logic programming and constraint programming \citep{JaffarL87}. 
Unlike programming logic, CLP can process
efficiently numerical values over different
domains: $\mathds{N}, \mathds{Z}, \mathds{R}$. 

The algorithm NUM \citep{Anthony97generatingnumerical} handles numerical
attributes by means of its refinement operator which can add numerical
literals in the following way:

\begin{enumerate}
   \item Users must define {\em a priori} the form of each numerical literal (linear, quadratic, etc).
         For instance, a linear literal would be like this:  $Y=C_1X+C_2$, where $C_1,C_2$ are
				 constants to be calculated
         by a $CLP$ system, and $X,Y$ are either new variables or variables already present
				 in the clause.
				
   \item If the refinement operator must add a numerical literal (to specialize any clause):
   
         \begin{enumerate}
               \item The NUM algorithm creates systems of equations
                 which depend on the form stated by the user, and the
                 number of declared constants. For instance, if the
                 numerical literal is linear, $Y=C_1X+C_2$, then the system
                 introduces a system of two equations and two
                 unknowns, where $C_1$ and $C_2$ become variables; and
                 $X,Y$ are replaced by the values of the background
                 knowledge and the examples:

								 \smallskip
               
                 Eq. 1) $5 = C_1 6 + C_2$
								
								 \smallskip
								 
								 Eq. 2) $-7 = C_1 3 + C_2$
                     
								 \smallskip
										
               \item The $CLP$ system solves each system of
                 equations. Each solution represents a numerical
                 literal. With the example given earlier, we could obtain
                 the next numerical literal: 
								
								 $Y = 4X - 19$.
                          
               \item The refinement operator uses the new literals to
                 specialize the current clause.
         \end{enumerate}

    \item The learning process goes on.
\end{enumerate}


The NUM algorithm uses efficiently a CLP system to find numerical literals.
This algorithm is embedded in a top-down refinement operator, although
it is not clear if NUM can be used in a bottom-up strategy. 
On the one hand, it would seem that NUM is not portable because it depends on the
implementation of the refinement operator. 
On the other hand, NUM finds different numerical literals such as
equations, inequations, linear and quadratic functions etc., but these
must be proposed by an user. This limits the use of NUM to experts on
CLP, something similar to lazy evaluation in Aleph.


\subsection{SMART+}

The \emph{SMART+} system, described in \citet{BottaG93}, is an extension
of the ML-SMART system which follows a top-down learning strategy.

To optimize globally the numerical arguments that occur in a clause 
$\varphi$, SMART+
includes a genetic algorithm which was adapted from the approach
proposed by \citet{Goldberg:1989:GAS:534133}.
This GA is performed after the specialization process as follows.
In this case, the GA is a second specialization over the clause $\varphi$.

Chromosomes are represented by binary strings.
All arguments in $\varphi$ define a string of reals numbers:
$rs = k_1, \ldots, k_n$. Then this string is converted into
a binary string. To avoid generating
values outside the range for some $k_i$, the following
algorithm is proposed to transform $rs$ into a binary string $bs$.\\


\emph{a. }Given a parameter $k_i$ and the range $\left[min_i,max_i\right]$
          assigned by the user, then $k_i$ is represented as follows:
					
    \begin{equation}
		       k_i = min_i + \delta_i / \Delta
		\label{smart1}
		\end{equation}

    Where $0 \leq \delta_i \leq 2^{N_i}$,
		$N_i$ is the number of bits chosen by the user
		for representing the increment $\delta_i$.
		In addition, $\Delta$ is 
		
		\begin{equation}
		   \Delta_i = \frac{2^{N_i}}{max_i-min_i}		
		\label{smart2}
		\end{equation}

\emph{b. } Given a string $rs = k_1, \ldots, k_n$, for each argument $k_i$
    in $rs$ a set of $N_i$ bits will be defined in $bs$, where the
		value of the corresponding $\delta_i$ is represented.


The GA generates an initial population $A(\varphi)$ and calculates
the information gain (fitness) for each chromosome.
After this, a standard genetic algorithm is executed.

Two crossover operators are defined. The first one is the
standard single point crossover \citep{Goldberg:1989:GAS:534133}, and the second one
is defined below.

Given two chromosomes (parents) $bs_1, bs_2$, and a parameter
$k_c$ randomly selected, two offsprings ${bs_1}', {bs_2}'$ are generated
in three steps.

\begin{enumerate}
   \item[i] The parameters to the left of $k_c$ in
          ${bs_1}'$ and ${bs_2}'$ are obtained
					from $bs_1$ and $bs_2$ respectively.

   \item[ii] The parameter $k_c$ in both offsprings is obtained
          by averaging the two corresponding values in the parents.
					
   \item[iii] The parameters to the right of $k_c$ are obtained
          in ${bs_1}'$ by copying the corresponding ones in
					$bs_2$ and viceversa in ${bs_2}'$.
\end{enumerate}



To select between these crossover operators $c_1$ and $c_2$, two probabilities $p_1$ and $p_2$
are defined respectively, with the constraint: $p_1 + p_2 \leq 1$.

The mutation operator is standard, namely
a subset of genes is chosen at random and
its values are changed. This operator is applied
with a probability $p_{mut}=0.001$.

SMART+ uses a GA to test different combinations of numerical arguments in a rule after
being refined. Somehow, the GA guides the  search in a top-down strategy, however
an adaptation to the bottom-up strategy is not mentioned. In addition to this,
SMART+ has a weak declarative bias, which represents a disadvantage
when compared to Aleph or TILDE.


\subsection{Multivalue learning in ILP {\it (MVL-ILP)}}\label{mvl_ilp}

The approach proposed in \citet{omt1} deals both with
numerical and categorical attributes.  This method 
creates a split $d$ for each numerical attribute, and
two subsets of values for each categorical attribute.
This information and additional predicates are used
to create new (multivalue) clauses and to enrich
the background knowledge. This approach is described
below.


\begin{enumerate}
\item  {\em Discretization/Grouping.}
In this step, each attribute $A=\left\{x_1,  \ldots, x_n\right\}$ is discretized or grouped in a binary way.

When $A$ is numerical, a split point $d$ is created (discretization). This divides the numerical
interval of $A$ in two subintervals $I_1 = \left[l_1,r_1\right]$ and $I_2 = \left[l_2,r_2\right]$
such as $l_1, l_2, r_1, r_2 \in A$, and $r_1 \leq d \leq l_2$.

When $A$ is categorical, two  subsets of categorical values $S_1$ and $S_2$ are created (grouping).
These subsets fulfill the following conditions: $S_1 \cup  S_2 = A$ and $S_1 \cap S_2 = \phi$.
   

This step is carried out by the split point selection algorithm found in
two decision tree inducers: 
Quick Unbiased Efficient Statistical Tree - QUEST \citep{quest1},
and Classification Rule With Unbiased Interaction Selection
and Estimation - CRUISE \citep{cruise1} which will be explained later.
In addition, the final user selects {\em a priori} one of these
algorithms as well as each attribute to be processed.

\item {\em Constructing multivalue clauses.}
After the discretization/grouping process both the splits and the subsets of
categorical values are used to build multivalue clauses.
If a literal within the body of a clause has at least one argument with
more than one value, then that clause is called \emph{multivalue}.
For each numerical attribute a pair of multivalue clauses are created. 
For instance:
    

\begin{lstlisting}[language=Prolog]
adult(P):- 
     age(P,Age), Age <= 21.
		
adult(P):- 
     age(P,Age), Age > 21.
\end{lstlisting}


If $A$ is categorical, then a pair of multivalue clauses like the following are built.


\begin{lstlisting}[language=Prolog]
class(Animal,reptile):- 
     cover(Animal,Cover), member(Cover,[none,scales]).
		
class(Animal,reptile):- 
     cover(Animal,Cover), member(Cover,[feathers,hair]).
\end{lstlisting}


\item {\em Modification of background knowledge.}
In this step all multivalue clauses are added to the background
knowledge. This addition enriches the language used in hypothesis constructing.

\item {\em Usage.} Finally, the new information is used by the ILP algorithm to 
induce a theory.
\end{enumerate}


As described below, the QUEST and CRUISE's split point selection algorithm carries out the discretization/grouping process
in the following two steps.


\emph{1. CRIMCOORD transformation. }Before splitting, both QUEST and CRUISE transform each categorical attribute into a numerical one.
If $A=\left\{x_1,  \ldots, x_n\right\}$, is a categorical attribute, then:

         \begin{itemize}
	             \item A \emph{dummy binary vector} is assigned to each $x_i$ with $i \in A$.
	                   For example, the attribute $A = \left\{red, green, blue\right\}$ has
	                   the dummy vectors: $red = 100$, $green = 010$, $blue = 001$.
	                   
	             \item Each dummy vector is then projected onto the largest discriminant
	                   coordinate (CRIMCOORD). This technique allows to transform
	                   a multidimensional vector (dummy) to a numerical value \citep{gnanadesikan1997methods}.
         \end{itemize}


\emph{2. Selection of split point. }

          \begin{itemize}
					       \item QUEST.	                     
											 The $k-means$ algorithm \citep{Macqueen67somemethods}
	                     with $k=2$ is applied to all values in $A$ to form two subsets of
	                     numerical values: $A_1$ and $A_2$. This process ensures a binary split.
	      
                       A quadratic discriminant analysis - QDA \citep{friedman1} is then applied to
	                     calculate the split point $d$ between $A_1$ and $A_2$.


                  \item CRUISE.									
									      A Linear Discrimination Analysis - LDA calculates the best split $d$. 
                        Since LDA is more efficient with normal distributions, before
                        applying LDA, a BOX-COX transformation is used to improve the normality of $A$.									
					\end{itemize}					


MVL-ILP is capable to deal both with numerical and categorical arguments.
This approach only creates two subintervals/subsets per each discretization/grouping process.
However, sometimes it is necessary to split a numerical argument in more than two subintervals
(or group a categorical argument in more than two subsets)
to reach a high accuracy. This approach does not implement a mechanism to test more than
two subinterval/subsets within the same clause.



\subsection{SIA01}\label{sia01}

The SIA01 system \citep{Augier95learningfirst} is an ILP system that represents each chromosome with 
a logic format. A clause is a chromosome, and its predicates and arguments are its genes. For instance, the clause $p\left(X,Y\right)\stackrel{}{\leftarrow}r\left(X,12,a\right)$
is a chromosome comprised of seven genes: 


\begin{figure}[h!]
    \centering
		\includegraphics[scale=0.1]{cromosoma1.eps}
	%\label{fig:cromosoma1}
\end{figure}

The fitness function is based on the consistency and the completeness of the current clause. This function penalizes those clauses
that cover more negative examples, and rates positively those clauses that cover more positive examples. Hence
this function only evaluates two classes.

{\bf Mutation. }If the gene is a numerical value, e.g. $12{.}3$, then the mutation operator will create an interval
which will contain that value. If the gene is a numerical interval, then the operator will enlarge it. This is shown as follows:

\begin{figure}[h!]
    \centering
		\includegraphics[scale=0.08]{cromosoma2.eps}
	\label{fig:cromosoma2}
\end{figure}


\bigskip

If the gene is a categorical value, then a set of categorical values is created adding another value. If the gene is a
set of categorical values, then one value is added, for example:

\begin{figure}[h!]
    \centering
		\includegraphics[scale=0.1]{cromosoma3.eps}
	\label{fig:cromosoma3}
\end{figure}

{\bf Crossover. }This operator only exchanges genes with the same data type.

\begin{figure}[h!]
    \centering
		\includegraphics[scale=0.07]{cromosoma4.eps}
	\label{fig:cromosoma4}
\end{figure}


Any choice to enlarge some range is random. Hence this is an
unsupervised process. 

The SIA01 system has an operator that generalizes a clause
by means of genetic algorithms. This operator can deal
both with numerical and categorical attributes, but in a
bottom-up strategy. This may limit the efficiency of
genetic algorithms, since the operator can not generate
and test more specific clauses.



\subsection{ECL}\label{ecl}

\emph{Evolutionary Concept Learning - ECL system} \citep{DivinaM05} is a relational learning
learner based on genetic algorithms. Each chromosome is represented by a clause 
of the form $L_0 \stackrel{}{\leftarrow} L_1, L_2, \ldots L_n$. Thus,
the preceding clause is a chromosome with $n+1$ genes.

The first version of ECL uses four mutation operators to specialize
or generalize a chromosome, this operators are:
add a literal, delete a literal, change a constant into variable,
change a variable into constant.
Three ECL extensions perform the following procedure to deal with numerical attributes.

If mutation operator adds a literal $C$ to a chromosome body,
it also adds an inequality $a \leq X \leq b$ for each numerical attribute $X$ within $C$. 
Depending on the initial values for each inequality, ECL has three extensions:

\begin{enumerate}
       \item {\em ECL-Local Unsupervised Discretization (ECL-LUD).}
										The inequality is initialized as follows: $a \leq X \leq b$, where $a,b$ are constants of the
										background knowledge.

      \item {\em ECL with Local Supervised Discretization with Fine grain initialization (ECL-LSDf)}.
			             The initial values $a$ and $b$ are called  {\it boundary points}  and are defined as follows.
					
																		Let $A$ be a numerical attribute, the values of $A$ occurring in the examples are sorted
																		in increasing order. A boundary point is the midpoint of two successive values of $A$
																		occurring in examples of different classes.


      \item   {\em ECL with Local Discretization with Coarse grain initialization (ECL-LSDc)}. 
                   The values $a$ and $b$ are initialized with {\em discretization points} which are obtained by discretizing
                  the numerical attribute $X$ with the Fayyad and Irani's discretization algorithm \citep{FayyadI93}.

\end{enumerate}

Five mutation operators modify the values $a$ and $b$. For
this, the system uses information on the distribution of the values of
each attribute. This information is obtained, {\em a priori,} with the
{\it Expectation-Maximization (EM) algorithm
  \citep{Dempster77maximumlikelihood}}. This algorithm returns $n$
clusters described by means $\mu_i$ and standard deviations
$\sigma_i$, where $1 \leq i \leq n$.

Let us consider the inequality $I = a \leq X \leq b$, and let $b_{cl}$ and 
$e_{cl}$ be the begin and the end of the cluster $cl$ which contains $I$,
then the following operators are used to modify $I$ and specialize/generalize
a chromosome during the mutation process.

\begin{itemize}
 \item \emph{ Enlarge. }This operator applied to $I$ returns the interval $I'=a' \leq X \leq b'$,
               where $a' \leq a$ and $b \leq b'$.
               
 \item \emph{ Shrink. }This operator applied to $I$ returns the interval $I' = a' \leq X \leq b'$,
               where $a' \geq a$ and $b' \geq b$.
               
 \item \emph{ Shift. } This operator applied to $I$ returns the interval $I' = a' \leq X \leq b'$,
               where $a'$ and $b'$ are points in the cluster containing $a,b$
               such that $P(a' \leq X \leq b') = P(a \leq X \leq b)$.
               
 \item \emph{ Change cluster. }This operator applied to $I$ returns the interval $I' = a' \leq X \leq b'$,
               where $a',b'$ belong to a different cluster.
               
 \item \emph{ Ground. }This operator applied to $I$ returns the interval $I' = a' \leq X \leq b'$,
               where $a'$ is in the cluster which contains $a,b$.
\end{itemize}


Unlike the SIA01 system, the ECL based methods use operators that generalize and
specialize a clause. However, the handling of numerical data depends 
on the implementation of genetic operators. Hence, to use this approach
in other ILP system, it is necessary modify its operators.
On the other hand, ECL does not implement operators to test subsets
of categorical values. 




\subsection{Propositionalization with LINUS and its extensions}\label{propositionalization}

{\it Propositionalisation} is the transformation of relational learning problems
to a propositional format. Its main objective is to solve a
problem that was originally a relational problem, with more
efficient propositional algorithms than the relational
systems \citep{knobbe1}.

There are basically two propositionalization types:

\begin{itemize}
	\item  {\it Database-oriented.} These methods define propositional features with
	          aggregate functions.  An {\it aggregate function} is a function that takes 
						a set of records in a database and returns a single value \citep{knobbe1},
            for example, the function $count\left(\right)$. 
						
 \item  {\it Logic-oriented.} This kind of propositionalisation deals with highly complex
            elements in background knowledge: molecular biology or language processing.
\end{itemize}

In both cases, the handling of numerical/categorical attributes is carried
out by a propositional learner. Hence, the efficiency to calculate
each threshold depends on that learning system.

LINUS \citep{Lavrac:1991:LND:645322.649501}, uses a restricted logic
language: function-free non-recursive Horn clauses with negation. The
propositionalization is carried out in three steps.


\begin{itemize}
				\item Transformation from relational to propositional form. 
In this step the target relation is equi-valent to the class attribute.
Relations in the background knowledge are equivalent to new attributes
whose Boolean values depend on the target relation arguments.


				\item A propositional learner is used to find an attribute-value hypothesis $H$.
LINUS can use the propositional learners: CN2, NEWGEM or ASSISTANT.

				\item The propositional rules induced are transformed into a relational form.
\end{itemize}

{\it Determinate LINUS - DINUS \citep{Lavrac:1991:LND:645322.649501}}, is an 
extension of LINUS. The main difference lies in the weakening of the language bias
which includes determinate non-constrained clauses. Another extension of
LINUS is the SINUS system \citep{KrogelEtAl:2003}, which includes
flattened Prolog clauses in the language. 
Propositionalization allows us to use (in ILP) methods which are efficient
in the propositional learning.  However, 
there may be loss of information during propositionalization. 


\subsection*{Our method}
As we can see, almost all ILP systems can deal with numerical attributes,
but not all of them implement strategies to test more than one 
categorical value at the same time.
Some strategies depend on the implementation 
of the refinement/specialization operators and the
search strategy (bottom-up and top-down). Hence,
there is not guarantee that these strategies can be
implemented in other ILP systems.
Other approaches to handle numerical information improve
significantly the power of numerical reasoning, but
their use is difficult for researches from other areas.
In the next section, we present the proposed method which 
can process both numerical and categorical attributes.
This method was implemented in the Aleph system,
but it can be added in any ILP system, since it does not
depend on the search strategy and operators.
In addition to this, our proposal uses the declaration types
of the Aleph system which allow users to define the
arguments to be processed.



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Grouping and Discretization for Background Knowledge Enrichment}\label{proposed_method}

As we can see in the previous section, the current ILP systems use its refinement operators
to deal mainly with numerical attributes (excepting MVL-ILP and TILDE which process the information
before the learning process namely in a global way), therefore these methods depend on the implementation of the ILP algorithms.
In this section we present a twofold method called Grouping and Discretization for Background Knowledge
Enrichment  (GD-BKE) to deal both with categorical and numerical attributes in relational learning. 
This method which does not depend on the search/refinement algorithms and it is capable of
processing each attribute in a global or local way\footnote{A method that uses the whole training data to
discretize/group attributes is global. On the other hand, local methods use a subset of the training data}.
To achieve this, GD-BKE enriches the hypothesis language with
numerical splits, subsets of categorical values and new relations.


To enrich the hypothesis language our method tests different numerical subintervals (each subinterval
is delimited by two split points) or subsets
of categorical values according to an evaluation function, and the best qualified are added into the background knowledge.
Due to the great amount of subintervals/subsets
that can be created, we decided to use an evolutive approach to generate and evaluate them.
We decided to embed our method in the Aleph algorithm, however 
it can  be used in any relational learning system. The modified Aleph algorithm is shown in the Algorithm \ref{gd_bke}.


\begin{algorithm}
\caption{Aleph algorithm with GD-BKE}\label{gd_bke}
\begin{algorithmic}[1]
\Require State parameters: modes, type,  determinations and new parameters (\S\ref{state_parameters}.)

\State[1] Select example $e$

\State  \hspace{0.75 cm} Create {\em FixedSubintervals and FixedSubsets}   (\S\ref{create_fixed})

\State \hspace{0.75 cm} Discretization/Grouping  (\S\ref{discretization_grouping})

\State \hspace{0.75 cm} Enrichment (\S\ref{discretization_grouping})

\State Build most specific clause (\S\ref{build_bottom_clause})

\State Search (\S\ref{aleph_search})

\State Remove redundant. Go to step 1.
\end{algorithmic}
\end{algorithm}

The main items of Algorithm \ref{gd_bke} are presented below.


\subsection{State parameters} \label{state_parameters}

In addition to the modes, types and determinations,
users can also declare the attributes to be discretized/grouped in any of the two
following ways:

\begin{itemize}
	\item $\mbox{\em Approach}(Goal(\ldots, \mbox{\em link, class, \ldots})$, $\mbox{\em Lit}
	      (\ldots, \mbox{\em link, attribute},  \ldots)).$
	      
				Where {\em Approach} can be either {\em discretization} or {\em grouping}.
				The argument {\em link} allows to relate the literal {\em Lit} that contains the 
				attributes to be processed with the target relation {\em Goal}. Each argument {\em attribute} 
				will be discretized/grouped respect to {\em class} which has two or more values. 
				The user can declare more one than attribute in {\em Lit}.

													
  \item $\mbox{\em Approach}(Goal(\ldots, \mbox{\em link, \ldots})$, $\mbox{\em Lit}
	      (\ldots, \mbox{\em link, attribute},  \ldots)).$
	
	      In this case the corresponding classes are not explicitly defined, therefore	      
	      are $positive$ and $negative$. 
\end{itemize}

Consider the Student Loan dataset \citep{uci} and its target relation \lstinline[language=Prolog]{no_pay/1} which is true for those
students who do not need to repay its loan (positive examples) and false
in other case. The following relations compose the
background knowledge: 

\begin{lstlisting}[language=Prolog]
male(S).
absence_from_school(S, Integer).
enrolled(S, School, Integer).       % School={ucsd,ucb,ucla,uci,occ,smc}
filed_for_bankruptcy(S).
unemployed(S).
\end{lstlisting}

As we can see, there are two numerical attributes (denoted as {\em Int}) and one categorical attribute ({\em School}).
For these attributes the user must declare the following lines:

\begin{lstlisting}[language=Prolog]
:-discretization(not_pay(link), absence_from_school(link, attribute)).
:-discretization(not_pay(link), enrolled(link, no, attribute)).								
:-grouping(not_pay(link), enrolled(link, attribute, no)).
\end{lstlisting}

The above lines indicate that the second argument in \lstinline[language=Prolog]{absence_from_school} 
and the third argument in  \lstinline[language=Prolog]{enrolled}
will be discretized. The second argument
in  \lstinline[language=Prolog]{enrolled} will be grouped. The arguments  \lstinline[language=Prolog]{no}
will not be processed. As mentioned above, the argument  \lstinline[language=Prolog]{link} 
relates each literal with the target predicate.\\


\subsection{Create FixedSubintervals and FixedSubsets}\label{create_fixed}

In this step a {\em FixedSubinterval} or a {\em FixedSubset}
is created for each attribute declared in the preceding step. These are defined as follows:

\begin{itemize}
							\item Let {\em attribute} be a numerical argument to be discretized and
							$\left\{v_1, v_2, \ldots, v_n\right\}$ be the set of values in increasing order
							such that each $v_i \in$ {\em attribute} and $v_i$ is related to the example $e$.
							We call {\em FixedSubinterval} to the range $\left[v_1 .. v_n\right]$.
							Note that if the corresponding example is related to one single value, then the {\em FixedSubinterval}
							is a single point.

							\item  Let {\em attribute} be a categorical argument to be grouped and $\left\{c_1 , \ldots, c_n\right\}$
							be the set of categories such that each $c_i \in $ {\em attribute} and $c_i$ is related to example $e$. We
							call {\em FixedSubset} to the set of categorical values $\left\{c_1 , \ldots, c_n\right\} $.
\end{itemize}

These are {\it fixed} because they will invariably be in all subintervals/subsets that will be tested.
In the problem student loan, the example \lstinline[language=Prolog]{student9}  is related to one single
value per each attribute declared, therefore a {\em FixedSubinterval} is a split point.
So Aleph creates two {\em FixedSubintervals} and one {\em FixedSubset} like these:


\begin{lstlisting}[language=Prolog]
Facts related to example student9                                                                                  

longest_absence_from_school(student9,4).
enrolled(student9,smc,6).

FixedSubintervals and FixedSubset related to student9

[4]   % From second argument in longest_absence_from_school
[6]   % From third argument in enrolled
{smc} % From second argument in enrolled
\end{lstlisting}

\bigskip

\subsection{Discretization and Grouping}\label{discretization_grouping}

Before the saturation stage, each attribute declared by the user is processed.
Depending on the attribute type there are two possible cases.

% \begin{adjustwidth*}{0.5cm}{0.5cm}

\begin{itemize}
\item Let {\em attribute} be a numerical argument and
$ \left[\mbox{\em min..max}\right]$ be its {\em FixedSubinterval}. 
Then an evolutive algorithm returns a set  of intervals (individuals) $I_n$,
where each individual contains the FixedSubinterval,
 this set is defined as follows:

\[
  I_n= \left\{x  = \left[\mbox{\em min'..max'}\right] \mid   \mbox{\em min'} \leq \mbox{\em min} \wedge \mbox{\em max} \leq \mbox{\em max'}\right\}
\]
	
We used populations with $15$ individuals because the execution time is 
significantly affected with larger populations. However, the user can
define populations with more individuals.


To evolve the individuals six mutation operators are defined. These
can enlarge or shrink an interval $x$, either on the left side, on the right side, or on both sides.
These operators are based on the work presented by \citet{DivinaM05}, but unlike
them, our operators are independent of the refinement operators.
This allows us to implement our method in any relational learning system,
regardless how the refinement operators are implemented. Each mutation operator
is applied with a probability $p=\frac{1}{6}$.
Once all individuals are mutated, we have a population with $30$ individuals (parents and children),
then the best $15$ are selected to be in the next generation.
The fitness function for an individual $x$ is based on information gain, see Eq. (\ref{fitness_information_gain}).


\begin{equation}
\mbox{\em IG}(x) = \mbox{\em ent}(\mbox{\em Int}) - \frac{\mbox{\em count}(x)}{\mbox{\em count}(\mbox{\em Int})} \mbox{\em ent}(x)
\label{fitness_information_gain}
\end{equation}

The corresponding entropy for some interval {\em Int} is presented in Eq. (\ref{fitness_information_gain2}):

\begin{equation}
   ent(\mbox{\em Int}) = - \sum^{\abs{S}}_{j=1} p(\mbox{\em Int},j) log_2 (p(\mbox{\em Int},j))
\label{fitness_information_gain2}
\end{equation}
      where	$\abs{S} = $ number of classes,
      {\em Int}  is the numerical interval of {\em attribute},
			$\mbox{count}(\mbox{\em Int}) =$ numeric values within the interval {\em Int},
			$p(\mbox{\em Int},j)$, is the proportion of values in {\em Int} that belong to the {\em jth} class.

In most of relational learning systems the value $\abs{S}=2$, but our method can evaluate
two or more classes.

\item Let {\em attribute} be a categorical argument and $\left\{c_1, \ldots, c_n\right\}$ be its   {\em FixedSubset}.
 In this case an evolutive algorithm searches for the best sets (individuals) of categorical values, each
subset contains the FixedSubset, defined as follows:

\[
C = \left\{y  \mid FixedSubset \subseteq y\right\}
\]

Like the system SIA01, previously seen in  Section \ref{sia01}, the proposed method can
add new values to each  individual to mutate it. However, our
method can also delete values. This allows the two mutation operators can generalize
and specialize a clause. Each mutation operator is applied with a probability $p=\frac{1}{2}$.
As in the previous case each population has fifteen individuals .

The fitness function is based on the DIstance Learning in Categorical
Attributes - DILCA \citep{IencoPM09} which is a distance measure between pairs of
categorical values. The fitness for an individual $y$ is the total sum
of the distances for each pair of categorical values in $y$. In addition to this,
if the sum of distances for two or more individuals is the same, then the value
$\frac{1}{\abs{y}}$ is added, because we want to minimize the distance and to favor the subsets
with more values.  Unlike the SIA01 system, this function can be evaluated with to two or more classes.
The fitness of a individual $y$ is showed in Eq. (\ref{categoricalFitness}).

\begin{equation}
Fitness(y) = \sum_{c_i c_j \in y} \sqrt{  \sum_{s_t \in S} (  P ( c_i \mid s_t ) - P( c_j \mid s_t )   )^2 } +\frac{1}{\abs{y}}
\label{categoricalFitness}
\end{equation}

where $c_i, c_j \in y$ such as $i \neq j$.  $S$ is the set of classes defined,			
						$P ( c_i \mid s_t )$ is the conditional probability for $c_i$ given $s_t$, and $P ( c_j \mid s_t )$ is the
						conditional probability for $c_j$ given $s_t$.

\end{itemize}


The intervals and subsets found by the algorithm are added to the background knowledge
(enrichment). For this, two types of predicates are defined: \lstinline[language=Prolog]{discr_split/1} and  
\lstinline[language=Prolog]{grp_subset/1}.
This information is used by the predicates: 
\lstinline[language=Prolog]{grp_member/2,  >=/2,  =</2}.
 A set of declaration modes is added for the preceding predicates.

Aleph enriches the background knowledge with some numerical and categorical individual like these:

\begin{lstlisting}[language=Prolog]
Numerical individual           Categorical individual

split([4,6]).									  group([smc,ucla]).
split([3.7,7]).								  group([smc,ucla,uci]).
split([5.4,15.6]).							group([smc,ucla,ucb]).
split([3.4,6.3]).						    group([smc,ucla,uci,ucb]).
\end{lstlisting}

The above numerical intervals refer to the third argument in literal {\em enrolled}, note that 
the {\em FixedSubinterval = [6]} is within each individual. Furthermore, the sets of categorical
values related to the second argument in the literal {\em enrolled} contain the {\em FixedSubset = {smc}}.\\

 \subsection{Build a bottom clause that entails the example $e$}\label{build_bottom_clause}

Aleph uses the enriched background knowledge to build the 
bottom clause. The following clause represents a typical bottom clause.

\begin{lstlisting}[language=Prolog]
not_pay(A) :-
   unemployed(A),absence_from_school(A,B),enrolled(A,C,D),
   filed_for_bankruptcy(A),gteq(B,4),gteq(B,3.7),
   gteq(B,5.4),gteq(B,3.4),lteq(B,6),
	 lteq(B,7),lteq(C,15.6), lteq(B,6.3),
   member(C,[smc,ucla]), member(C,[smc,ucla,uci]),
   member(C,[smc,ucla,ucb]), member(C,[smc,ucla,uci,ucb]).
\end{lstlisting}

As we can see, our approach embedded in the basic Aleph algorithm is guided by all 
values (numerical and categorical) related to $e$, however this method does not
depend on the bottom-up ILP algorithms because the enrichment process is
performed before constructing candidate hypotheses. Furthermore, 
neither the refinement operators nor the search algorithms are modified.

\subsection{Search}\label{aleph_search}

The proposed approach does not affect the algorithm selected by the user
(Aleph has several search strategies, refinement operators and evaluation functions).
Thus, with the enriched bottom clause, Aleph induces a rule.

Thus, if the discretized/grouped attributes are relevant to the ILP problem analyzed then 
the final theory will contain clauses with information obtained during the preprocessing data.
The following clauses show this difference. On the left side we can see the rules obtained by
grouping and discretizing the second and third argument, respectively, of the literal
\lstinline[language=Prolog]{enrolled/3}. On the right side are the rules built by the basic Aleph algorithm.
As we can see, the rules obtained with our method can cover more examples (positive
and negative). However the fitness functions allow Aleph to find the values that cover
as less negative examples as possible.


\begin{lstlisting}[language=Prolog]
[GD-BKE]                             [Aleph]
[Theory]                             [Theory]

[Rule 1]                             [Rule 1]
[PosCover=98 NegCover=0]             [PosCover=18 NegCover=0]
not_pay(A) :-                        not_pay(A) :-
   enrolled(A,B,C),                     enrolled(A,ucsd,5).
   discr_gteq(C,11.56),                                       
   discr_lteq(C,14.5).     
   
[Rule 7]                             [Rule 7]   
[PosCover=48 Neg cover=3]            [PosCover=12 NegCover=2]
not_pay(A) :-                        not_pay(A) :-
   enrolled(A,B,6),                     enrolled(A,uci,8).
   grp_member(B,[ucb,ucla,uci]).
	           
\end{lstlisting}

In the next section we present the experiments performed to compare the accuracy of our method.



\section{Experimental results}
\label{experimental_results}

In this section, we present the results obtained by comparing our method with two strategies
to deal with numerical attributes: lazy evaluation in Aleph and discretization in 
the TILDE algorithm.  The main goal of these experiments is to answer the questions raised in the Section \ref{intro} with regard to
the accuracy and how to create and use the new information (numerical splits and subsets of categorical
values).  For this, we use the following methodology, datasets and software.



\subsection{Materials}

The following datasets were used to evaluate the performance
of our approach:

\begin{itemize}
     \item From the UCI machine learning repository \citep{uci}:
		       Australian Credit Approval, Pittsburgh Bridges,
					 Japanese Credit Screening, German Credit,
					 Iris Dataset, Moral Reasoner, Ecoli and
					 Student Loan.
		\item  We also performed tests over the well-known ILP
		       datasets: Carcinogenesis \citep{Srinivasan97carcinogenesispredictions},
					 KRK illegal chess position-KRKi \citep{Muggleton89anexperimental}, 
					 and mutagenesis  data \citep{Muggleton94mutagenesis:ilp} 
\end{itemize}

The datasets listed above were analyzed with discretization and lazy evaluation in Aleph and GD-BKE
which is embedded in the Aleph system. Aleph is implemented in
YAP Prolog version 6.2.2. \citep{journals/tplp/CostaRD12}.
These datasets were also analyzed with the TILDE learner which is included in 
the ACE data mining system \citep{ace1}.
All experiments were performed on a modern multicore PC machine.


\subsection{Method}

Since a problem in TILDE can be multi-class and
Aleph deals essentially with binary class problems, we have
divided the datasets used in two types:
binary and multi-class datasets.

\subsubsection*{Binary datasets}

In this case we analyzed only datasets with negative and positive
examples: Australian Credit Approval, Carcinogenesis, Japanese
Credit Screening, German Credit, Moral Reasoner, Student Loan,
illegal Chess Position and Mutagenesis.

To compare each analysis made with TILDE and Aleph
we defined the following equivalences in the corresponding mode and type declarations.


\begin{lstlisting}[language=Prolog]
Aleph declarations           TILDE declarations

goal(...).                   goal(...,pos). %positive example
:
goal(...).                   goal(...,neg). %negative example
:

modeb(1,lit(+t1,-t2,#t3)).   rmode(1,lit(+T1,-T2,#)).

% types
t1(value1). t1(value2)...    type(lit(t1,t2,t2)).
\end{lstlisting}

It should be noted that unlike Aleph in TILDE
each example must have a class argument. In this case
there are only two classes: positive and negative.




\subsubsection*{Multi-class datasets}

In this type the following datasets have more than two
classes: Pittsburgh bridges (6 classes), Iris
dataset (3 classes) and Ecoli (8 classes). 
The mode and type declarations for these datasets are as follows:


\begin{lstlisting}[language=Prolog]

Aleph declarations              TILDE declarations

% only positive examples
set(evanfn,posonly).

% positive examples
goal(...,class1).               goal(...,class1).
goal(...,class2).               goal(...,class2).
:                               :
goal(...,classN).               goal(...,classN).
\end{lstlisting}

We must note that to declare each class in Aleph, the target predicate must have an extra argument: the 
corresponding class. In addition to this, 
for each positive example we must create one or more negative examples: 
closed-world assumption. This can cause the creation of many
negative examples. To avoid this problem we use the Aleph option {\em posonly} to
induce theories from positive examples. 


Finally, we set the followings parameters both for binary and muti-class datasets:

\begin{lstlisting}[language=Prolog]
Aleph format              TILDE format

% Minimum accuracy of a rule (Aleph) or a node (TILDE)
set(minacc,0.80).         accuracy(0.80).

% Lower bound on the number of examples to be covered
set(minpos,2).            minimal_cases(2).
\end{lstlisting}

For our experiments, GD-BKE processed  each 
numerical/categorical attribute in a local way, namely
that it was discretized or grouped before each saturation step.

A 10-fold validation and a t-test (with a value $\alpha=0.05$) were
executed for each dataset with the following results.





\subsection{Results}

Here are the results obtained by comparing GD-BKE with discretizatino and lazy Evaluation
(thereafter called Lazy Aleph), and discretization in TILDE.


\paragraph{GD-BKE vs Discretization and Lazy Aleph}

The Table \ref{tab:bkevslazy} shows that GD-BKE improved the accuracy significantly in 
four datasets (marked with the symbol $\dagger$). However, in these cases the number
of rules increased with GD-BKE.  Moreover, despite GD-BKE and Lazy Aleph discretized the
same numerical attributes, our proposal improved the accuracy in almost all cases, excepting 
for Moral Reasoning and Pittsburgh Bridges we believe that these results derive mainly from 
the following two reasons:

\begin{itemize}
    \item Unlike Discretization and Lazy Aleph our proposal uses a grouping method to deal with categorical attributes.
		      In some cases the grouping of categorical attributes can improve the
					accuracy. 

    \item GD-BKE discretizes/groups with respect to two or more classes
		      whereas the code used for Discretization and Lazy Evaluation is restricted to positive and negative 
					examples.  In fact, this code was taken from the examples given in \citet{aleph1}.
					Moreover, as stated earlier when a dataset has more than two classes we must 
          use the {\em posonly} mode in Aleph, therefore the code for Lazy Aleph can not discretize properly
          each numerical attribute since there are not negative examples.
					Note that the code for Lazy Aleph can be modified to discretize with respect to more than two classes, however
					the final user must should have basic knowledge of Prolog and
					Inductive Logic Programming.  Clearly, this is not practical. Our
					method just requests mode declarations which are easier to add.
\end{itemize}

\begin{table}[h]
% For LaTeX tables use
\centering
\scalebox{1}[1]{
\begin{tabular}{lllllll}
\hline\noalign{\smallskip}
Dataset & \multicolumn{2}{|c|}{GD-BKE} & \multicolumn{2}{|c|}{Lazy Aleph} \\
\noalign{\smallskip}\hline\noalign{\smallskip}
       & $A(\%)$ &  {\em Rules} & $A(\%)$ &  {\em Rules}\\
\cmidrule(l){1-3} \cmidrule{4-5} 
%\noalign{\smallskip}\hline\noalign{\smallskip}
Australian$\dagger$& \bf 73.6 & 13.7 & 60 & \bf 4.8   \\

Japanese Credit$\dagger$ & \bf 74.89 & 7.5 & 61 & \bf 6.7 \\

German Credit &  \bf 72.2 & \bf 53.6 & 71 & 57.1  \\

Moral Reasoning & \bf 96 & \bf 1 & \bf 96 & \bf  1\\

Carcinogenesis & \bf 56.6 & \bf 13.5 & 51.9 & 19.2  \\

KRKi  & \bf 49.7 & \bf 72.6 & 49 & 89.8   \\

Student Loan$\dagger$  & \bf 99.9 & 8  & 62 & \bf 3.9    \\

Mutagenesis  & \bf 82.9 &  \bf 7.1 & 77.6 & 11.1   \\

Pittsburgh Bridges & \bf 97.2 & \bf 8 & \bf 97.2 & 8.2   \\

Iris & \bf 99.3 & 5.4 & 97.3 & \bf 3   \\

Ecoli$\dagger$ & \bf 98.8 & 9.5 & 97 & \bf 7.6   \\
\noalign{\smallskip}\hline
\end{tabular}}
\caption{This table show both the accuracy and the number of rules
for each dataset. These results were obtained with GD-BKE and Lazy Aleph. 
The best values are shown in bold. The symbol "$\dagger$" indicates that accuracy of GD-BKE is significantly 
better than Lazy Aleph.}
\label{tab:bkevslazy}       % Give a unique label
\end{table}






\paragraph{GD-BKE vs TILDE}

GD-BKE improves significantly the accuracy in six datasets (see Table \ref{tab:bkevstilde} 
where these datasets are marked with the symbol $\dagger$), but
unlike the previous case, GD-BKE decreases the number of rules in four of them.
In some datasets TILDE has a higher accuracy than GD-BKE even
with a smaller number of rules.
For instance, for Carcinogenesis, KRKi and Mutagenesis datasets, TILDE constructs theories with better 
accuracy, however GD-BKE decreases the number of rules in two of them
with a competitive accuracy. 
On one hand, we believe that these results
may vary because the learning setting in Aleph and TILDE are not strictly analogous.
On the other hand, TILDE can discretize multi class problems, but this process is only
global, namely numerical attributes are discretized one time before the learning process.
GD-BKE can also discretize (and group) in a local way: 
before each saturation step. 
The enriched language not only decreases the number or rules, but also creates
rules more easier to understand (expressiveness), the following rules show this
contrast.

\begin{lstlisting}[language=Prolog]
Rule created with GD-BKE            Rules created with TILDE

illegal(A) :-         					    illegal(A,[pos]) :-
   wk(A,2,B), wr(A,2,C),	              wr(A,B,4),bk(A,C,2), !.
	 aleph_grp_member(C,[4,6,7]).     illegal(A,[pos]) :-
                                        wr(A,B,6),wr(A,0,C), !.
                                    illegal(A,[neg]) :- 
																				wr(A,B,7),bk(A,C,3), !.
\end{lstlisting}



\begin{table}[h]
% For LaTeX tables use
\centering
\scalebox{1}[1]{
\begin{tabular}{lllllll}
\hline\noalign{\smallskip}
Dataset & \multicolumn{2}{|c|}{GD-BKE} & \multicolumn{2}{|c|}{TILDE} \\
\noalign{\smallskip}\hline\noalign{\smallskip}
       & $A(\%)$ &  {\em Rules} & $A(\%)$ &  {\em Rules}\\
\cmidrule(l){1-3} \cmidrule{4-5} 
%\noalign{\smallskip}\hline\noalign{\smallskip}
Australian & \bf 73.6 & \bf 13.7 & 72 &  30   \\

Japanese Credit$\dagger$ & \bf 74.89 & \bf 7.5 & 67.7 & 10.9 \\

German Credit &  \bf 72.2 & \bf 53.6 & 71.4 & 69.8  \\

Moral Reasoning$\dagger$ & \bf 96 & \bf 1 & 88 & 3 \\

Carcinogenesis & 56.6 & \bf 13.5 & 63 & 34.3  \\

KRKi  & 49.7 & \bf 72.6 & \bf 50 & 134.9   \\

Student Loan$\dagger$  & \bf 99.9 & 8  & 82.1 & \bf 5    \\

Mutagenesis  &  82.9 &  7.1 & \bf 84.2 & \bf 3.7   \\

Pittsburgh Bridges$\dagger$ & \bf 97.2 & \bf 8 & 61.9 & 12.5   \\

Iris$\dagger$ & \bf 99.3 & 5.4 & 93.1 & \bf 3   \\

Ecoli$\dagger$ & \bf 98.8 & \bf  9.5 & 78 & 11.8   \\
\noalign{\smallskip}\hline
\end{tabular}}
\caption{This table show both the accuracy and the number of rules
for each dataset. These results were obtained with GD-BKE and Tilde. 
The best values are shown in bold. The symbol "$\dagger$" indicates that accuracy of 
GD-BKE is significantly better than Tilde.}
\label{tab:bkevstilde}       % Give a unique label
\end{table}


We can see that for the accuracy, our method is better than or equal to Aleph and TILDE 
in most of the datasets, the Figure \ref{fig:grafica_precision}  graphically illustrates this trend.


\begin{figure}[h]
	\centering
	\fbox{
		\includegraphics[scale=0.28]{grafica_precision2.png}
		}
	\caption{This graphic shows the accuracy for each dataset. GD-BKE is better than
Lazy Aleph and TILDE in most of datasets.}
	\label{fig:grafica_precision}
\end{figure}


 
\section{Conclusions and Future Work}\label{conclusions}

In this paper, we have presented a method, called GD-BKE,  to deal both with numerical 
and categorical attributes.  This method uses a genetic algorithm to find the best numerical splits and
categorical subsets before each saturation step (in the basic Aleph algorithm), therefore this
is independent of the refinement operators and the search process, and can be implemented
in any relational learning system.

The numerical splits, the subsets of categorical values, together with new predicates are
added to the background knowledge. Then this new information
is used to build the bottom clauses and the rules of the final theory.

The final user does not need to add all code to process attributes, like Lazy Evaluation in
Aleph, but the user just adds a few lines to indicate the attributes
which will be processed. This feature of our method allows users to test the
discretization and grouping in an easy way.

GD-BKE was compared with Lazy Evaluation in Aleph and discretization in
the TILDE system. With the obtained results,  we can draw some conclusions: 

\begin{itemize}
  \item  Unlike other genetic approaches like SMART+ \citet{BottaG93}, our method 
        can deal with both categorical and numerical attributes. Moreover, just as 
        systems based on learning from interpretations (like the TILDE system) our approach can discretize/group 
        with more than two class, not only positive and negative examples.

  \item The quality of the final theories depends on the method used as well as the relevance 
	            of the processed attributes.

  \item ILP problems whose final theories do not need constants to be induced on 
        them can not benefit from this method. In this case, variables are useful 
        to better explain that concept.
        
  \item The size of the search space can be affected with the proposed method. 
        On one hand a discretized/grouped attribute which is not relevant can generate 
        a large amount of unnecessary candidate rules. 
        On the other hand a discretized/grouped attribute which is relevant can 
        decrease significantly	the number of candidate rules.
				
  \item Finally, users do not know in advance which attributes will be more useful, therefore 
        intuition is an important element to select each attribute.
        Our method allows to experiment easily with different attributes.

\end{itemize}
					
					

Some considerations for further work are as follows.
Since not all cases were successful, in terms of accuracy and simplicity, it is necessary to implement and test other fitness functions and more
datasets. These tests will help us to identify the factors which affect both accuracy and simplicity of the final theories, as well as the
search space size. 
Another path of research is to look into
multivariable predicates, if there are correlations between two or more attributes.
Thus, we want to investigate if these correlations cab be used to improve the performance in ILP (accuracy, simplicity and time of execution)
and what kind of problems can be treated with these predicates.
Finally, to identify whether an attribute is relevant or not, it could be useful to include a feature selection process to remove irrelevant literals.
We want to implement in Aleph  these ideas and compare performance with other ILP systems that handle data types.



\section*{Acknowledgments}

We would like to thank Eduardo Morales for his valuable input writing this paper.



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



















































































































% \bibliographystyle{spmpsci}
\bibliography{biblio}



\end{document}






















