%%%%%%%%%%%%%%%%%%%%%%% file template.tex %%%%%%%%%%%%%%%%%%%%%%%%%
%
% This is a general template file for the LaTeX package SVJour3
% for Springer journals.          Springer Heidelberg 2010/09/16
%
% Copy it to a new file with a new name and use it as the basis
% for your article. Delete % signs as needed.
%
% This template includes a few options for different layouts and
% content for various journals. Please consult a previous issue of
% your journal as needed.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% \documentclass{svjour3}                     % onecolumn (standard format)
\documentclass[smallcondensed]{svjour3}       % onecolumn (ditto)
% \documentclass[smallextended]{svjour3}       % onecolumn (second format)
%\documentclass[twocolumn]{svjour3}          % twocolumn
%
% \smartqed  % flush right qed marks, e.g. at end of proof
%
\usepackage{listings}
\usepackage{graphicx}
\usepackage{changepage}
\usepackage{epsfig}
\usepackage{dsfont}
\usepackage{array}
\usepackage{amsmath}
\usepackage{fancyvrb}
\usepackage{booktabs}
\usepackage{mathptmx}
\usepackage{sidecap}

% Provides \labelformat, which changes how \ref references look
\usepackage{varioref}

% \setlength{\textwidth}{\dimexpr\pdfpagewidth-.2.5in}% Equal left/right margins
% ESTO PERMITE USAR LOS SIMBOLOS DE VALOR ABSOLUTO
% ESTO PERMITE USAR LOS SIMBOLOS DE VALOR ABSOLUTO
\newcommand{\abs}[1]{\left\vert#1\right\vert}

\RequirePackage{fix-cm}

\usepackage{color}
\definecolor{lightgray}{rgb}{.94,.94,.94}
\definecolor{darkgray}{rgb}{.3,.3,.3}
\definecolor{purple}{rgb}{0.65, 0.12, 0.82}
\definecolor{blue}{rgb}{0.0, 0.0, 0.82}
\definecolor{red}{rgb}{0.83, 0.0, 0.0}
\definecolor{green}{rgb}{0.1, 0.55, 0.1}

\lstdefinelanguage{Prolog}{
% basicstyle=\small,
% frame=trBL,
% linewidth=8cm,
% backgroundcolor=\color{lightgray},
keywords={modeh,modeb,determination,set,
discretization,grouping,
lazy_evaluate,to_be_discretized,
discretized,rmode,threshold,posonly},keywordstyle=\bfseries,
commentstyle=\small\color{darkgray}\ttfamily, tabsize=2,
% En este caso l indica que es solo una letra el identificador para los comentarios % .......
% En este caso s indica que es una cadena encerrada entre dos delimitadores para los comentarios /* ..... */
morecomment=[l]{\%},
morecomment=[s][\color{red}]{/*}{*/},
emph={adult,gteq,lteq,number,age,person,A,C,B,D,E,F,G,H,I,J,K,L,Age,Person,class,
Animal,reptile,mammal,fish,bird,Cover,member,feathers,hair,scales,none,
true,false,link,active,atm,no,attribute,logp,no_payment,
absence_from_school,enrolled,male,
S,Int,School,unemployed,ucb,ucla,uci,smc,
bottom,clause,Extended,Aleph,Pos,Rule,Neg,
Theory,theory,no_payment_due,cover,PosCover,NegCover,
VarType,Arity,Goal_Pred,
filed_for_bankruptcy,S,
discr_split,evalfn,
grp_subset,
grp_member,
discr_lteq,
discr_gteq,
discr_between,
goal,pos,neg,Positive,Negative,example,format,Aleph,TILDE,
lit,T1,T2,t1,t2,t3}, emphstyle=\ttfamily,
%moredelim=[l][\ttfamily]{,},
%moredelim=[l][\ttfamily]{\{},
%moredelim=[l][\ttfamily]{\}},
%moredelim=[l][\ttfamily]{(},
%moredelim=[l][\ttfamily]{)},
%moredelim=[l][\ttfamily]{[},
%moredelim=[l][\ttfamily]{]},
literate={\#}{\bfseries{\#}}{1}
         {*}{\bfseries{*}}{1}
         {+}{\bfseries{+}}{1}
         {-}{\bfseries{-}}{1}
         {=<}{\bfseries{=<}}{1}
         {=}{\bfseries{=}}{1}
         {=>}{\bfseries{>=}}{1}
				 {/}{\bfseries{/}}{1}
				 {:-}{\bfseries{:-}}{1}
				 {:}{\bfseries{:}}{1}
				 {[}{\rmfamily{[}}{1}
				 {]}{\rmfamily{]}}{1}
				 {(}{\rmfamily{(}}{1}
				 {)}{\rmfamily{)}}{1}
				 {\{}{\rmfamily{\{}}{1}
				 {\}}{\rmfamily{\}}}{1}
				 {,}{\bfseries{,}}{1}
}



\journalname{Journal of Machine Learning}


\newtheorem{defi}{{\sc {Definition} } }[section]

\begin{document}

\title{Background Knowledge-enrichment for bottom clauses improving%\thanks{Grants or other notes
%about the article that should go on the front page should be
%placed here. General acknowledgments should be placed at the end of the article.}
}
% \subtitle{Do you have a subtitle?\\ If so, write it here}

%\titlerunning{Short form of title}        % if too long for running head

\author{Orlando Mu\~noz-Texzocotetla \and\\ 
        Ren\'e MacKinney-Romero}

%\authorrunning{Short form of author list} % if too long for running head

\institute{O. Mu\~noz \at
              Departamento de Ingenier\'ia El\'ectrica\\
              Universidad Aut\'onoma Metropolitana\\
              M\'exico D.F.  09340, M\'exico\\
							Tel.: 5804 4600 ext 1326\\
              \email{magicorlan@gmail.com}           %  \\
           \and
           R. MacKinney Author \at
              Tel.: 5804 4600  Ext. 1145\\
              \email{rene@xanum.uam.mx}
}

\date{Received: date / Accepted: date}
% The correct dates will be entered by the editor


\maketitle

\begin{abstract}
In this paper we present a method to deal with numerical and categorical
attributes by enriching the language which is used to
construct the rules in final theories. Although this was implemented in
the Aleph system, which induces each rule from the bottom clause
construction, this can be applied by any ILP system.
The proposed method is based on numerical fixed subintervals and 
categorical subsets. Each subinterval/subset contains
values, for each attribute which is predefined, that are related with the
example to saturate. The enriched language allows to reduce the number
of rules of the final theories and, in most cases, to improve the accuracy.
When our propose does not increase the accuracy this is as good
as other systems.
We compared this approach with other strategies to deal with numerical
attributes: lazy evaluation in Aleph and discretization
in the TILDE system.
Finally, we  have discus about the results obtained and future work.
\keywords{Machine Learning \and Inductive Logic Programming \and 
Numerical Attributes \and Categorical Attributes \and Discretization 
\and Grouping}
% \PACS{PACS code1 \and PACS code2 \and more}
% \subclass{MSC code1 \and MSC code2 \and more}
\end{abstract}

\section{Introduction}
\label{intro}


Inductive Logic Programming (ILP) is 
a subfield of machine learning which overcomes two drawbacks of propositional learning:
a limited data representation (propositional
logic language), and the inability to use of background knowledge
during the learning process. This allows to analyze problems
with complex domains such as: natural language processing,
finite element mesh design,
modeling of dynamic systems, 
network mining, music analysis, web mining, 
robotics, drug discovery, software engineering, etc. We refer to
\cite{lavrac1,Muggleton99inductivelogic,Muggleton_2012_IT_2123932_2123939,BergadanoGu95}
for consulting these and other applications.


Formally, ILP is defined as the intersection between machine learning and logic 
programming \cite{muggleton1}.

Therefore, theories are induced from a set of examples.
Although unlike other learning systems,
ILP algorithms do not use exclusively examples, but
use also information about the problem domain: background knowledge.


The language used in ILP is based on logic programs.
Thus, a logic program can
represent a set of positive examples $E^+$, a set of negative examples
$E^-$, a background knowledge $B$, or a theory $T$.
The main goal is to find a theory $T$ from a set of positive $E^+$ and negative $E^-$
examples, and from a background knowledge $B$. This task (Normal Setting ILP)
is described as follows:\\

{\flushleft {\bf Normal Setting ILP}}\\

{\bf Given:} 
    \begin{itemize}
		     \item a finite set of clauses $B$,
				 \item a finite set of clauses $E^+$, and 
				 \item a finite set of clauses $E^-$
     \end{itemize}

\medskip		

{\bf Find a theory  $\mathbf{T}$ (set of hypotheses or rules), such that} 
     \begin{itemize}
             \item $B \wedge T \models E^+$  \emph{ (completeness)},  and
						 \item $B \wedge T \not\models E^-$ \emph{ (consistency)}
			\end{itemize}

If $T$ is complete and consistent then $T$ is \emph{ correct.}\\


The ILP task can be viewed as a search problem.
In fact, learning tasks can be viewed as search problems \cite{Mitchell82}.
This search process is performed by a learner which can be described by means of
three elements: a search space, a search strategy and a search heuristic. 
Regarding the search space, we can remark the following issue:

When the language bias is stronger\footnote{In general, a bias is a mechanism to
restrict the search space \cite{UtgoffM82}}, the search space becomes smaller and
thus the learner is more efficient, but it is likely that the hypothesis
found can not represent a suitable solution that explains the target concept.

For instance, ILP systems usually define a language bias which allows to test a single constant 
value (numerical or categorical) in constructing hypothesis, thus inducing inaccurate 
theories with many rules. The following hypothesis is induced with an usual language bias,
namely without handling of attribute types.

\begin{equation}
      adult(A) \stackrel{}{\leftarrow} age(A,19)
			\label{clauseage1}
\end{equation}			

This clause explains that a person $A$ is an adult if he/she is $19$ years old. We must note
that a theory that explains whether a person is an adult or not, should contain many hypotheses
as the preceding clause, Eq. (\ref{clauseage1}). A clause for each age. Furthermore, if not all values are represented
in the examples and the background knowledge, then the final theory will be inaccurate. In our example,
not all adults will be explained with that hypothesis.

The easiest way of overcoming these drawbacks is to weaken manually the language bias. 
For this, we can add the predicate $> / 2$ to obtain a theory like this, Eq. (\ref{clauseage2}):

\begin{equation}
         adult(A) \stackrel{}{\leftarrow} age(A,B) \wedge B > 17
         \label{clauseage2}
\end{equation}

This clause is a good representation of the accurate age of adults (in most countries), also 
it is more expressive because has only one hypothesis. 


This problem can also appear with categorical arguments with a lot of possible values. Let us
see a small example, suppose the goal relation {\em quadrilateral}$/2$ which explains whether a
geometric figure is a quadrilateral or not. The two following theories classify the same
set of geometric figures, however in the second the relation {\em member}$/2$ reduces the number
of rules, Eq. (\ref{clausequadrilateral2}).

\begin{equation}
   Theory \hspace{0.25cm} 1 \hspace{0.25cm}
   \begin{cases}
         quadrilateral(A,yes) \stackrel{}{\leftarrow} sides(A,4)\\
         quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,3)\\
         quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,5)\\
         quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,6)\\
         quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,7)\\
         quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,8)
   \end{cases}
         \label{clausequadrilateral1}
\end{equation}


\begin{equation}
   Theory \hspace{0.25cm} 2 \hspace{0.25cm}
   \begin{cases}
         quadrilateral(A,yes) \stackrel{}{\leftarrow} sides(A,B), member(B,\left[4\right])\\
         quadrilateral(A,no) \stackrel{}{\leftarrow} sides(A,B), member(B,\left[3,5,6,7,8\right])
   \end{cases}
         \label{clausequadrilateral2}
\end{equation}

Although, we must  take into account the following questions:


 \begin{itemize}			
			 \item How can we know that \mbox{\em 17}  is the best split?
			 
			 \item How can determine the subsets $\left[4\right]$ and $\left[3,5,6,7,8\right]$?
			 
			 \item Are the predicates $>/2$ and {\em member}$/2$ suitable for the above examples?
       
       \item In general, how can we find best numerical/categorical values and suitable predicates?
 \end{itemize}


To answer these questions, we present an approach called Extended Aleph which can deal
with both numerical and categorical attributes (in this paper, the 
terms ``attribute'' and ``argument'' are interchangeable).

This approach processes the selected attributes before the bottom
clause construction which is carried out by the Aleph system \cite{aleph1}.
When an attribute is numerical, several splits are tested with a genetic algorithm
and the best chromosomes are added to the background knowledge. On the other
hand, for each categorical attribute several subsets of values are tested, and
the best are added to the background knowledge. This is done for
regarding two or more classes (not only positive and negative) depending
on the problem type.
To use the information calculated some predicates are added to the language
space. 
This approach was implemented in the Aleph system \cite{aleph1} and compared
with other strategies: lazy evaluation in Aleph
and discretization in the TILDE system.

The remainder of this paper is structured as follows. 
Section \ref{previous_work} provides a review of some strategies to handle
attribute types in ILP. Our approach is presented in section \ref{proposed_method}.
Next, our experiments and results are discussed  in section \ref{experimental_results}.
Finally, section \ref{conclusions} presents our conclusions and future work.


\section{Previous work}
\label{previous_work}

This section presents some strategies to deal with numerical and categorical
attri\-butes in ILP. Since we compared our method with lazy evaluation in the Aleph
system and discretization in the TILDE system, we place more emphasis on these
methods.


\subsection{Lazy evaluation in Aleph}

A Learning Engine for Proposing Hypotheses (Aleph) is an ILP system which
was implemented in Prolog. This system can emulate some the functionality
of other ILP systems, like FOIL \cite{quinlan1}, WARMR \cite{Dehaspe99discoveryof} and 
FORS \cite{Karalic:1997:FOR:251646.251651}, and its basic algorithm
consists only the following four steps \cite{aleph1}.



    \begin{enumerate}
		     \item Select an example $e$ to be saturated. If there are no
		           more examples then stop.
				 \item Construct the more specific clause (called bottom clause). For this,
				       Aleph follows the restrictions language based on the following
				       declarations:
				       
				       \begin{itemize}
				              \item \emph{Mode declarations} determine the goal relation
				              \lstinline[language=Prolog]!(modeh)! and literals in the body of each hypothesis
											\lstinline[language=Prolog]!(modeb)!.
				              For instance, the modeh declaration below indicates that
				              the relation \lstinline[language=Prolog]!adult/1! will be the
				              head of the bottom clause and each modeb declaration determines a
				              literal in the body of the bottom clause (or final hypotheses).
				              

\begin{lstlisting}[language=Prolog]
:-modeh(1,adult(+person)).

:-modeb(*,age(+person,-number)).
:-modeb(*,age(+person,#number)).
\end{lstlisting}
				              
				              Where \lstinline[language=Prolog]!+! indicates an input variable, 
											\lstinline[language=Prolog]!-!
				              is an output variable, and \lstinline[language=Prolog]!#! is a constant.
				              
				              \item \emph{Determinations }statement the literals that can be
				              used to construct a bottom clause or a hypothesis. The next
				              declaration determinates that the relations \lstinline[language=Prolog]!father/2!
				              and \lstinline[language=Prolog]!mother/2! will appear in the body of the rules.
				              
\begin{lstlisting}[language=Prolog]
:-determination(adult/1, age/2).
:-determination(adult/1, age/2).
\end{lstlisting}
				              
A typical bottom clause is as follows:
											
\begin{lstlisting}[language=Prolog]           
adult(A):- age(A,B), age(A,C), age(A,13), age(A,11),
        age(A,12), age(A,15), age(A,60), age(A,40).
\end{lstlisting}				              
				       \end{itemize}
     
         \item Search for a clause more general than the bottom clause. This step
         is called \emph{reduction.}
         
         \item Remove redundant examples and return to step $1$.
     \end{enumerate}


To deal with numerical attributes, the user can define relations by means
of modes and determinate declarations. Thus, for the above example the user
should add the following relations to improve the bottom clause and final
theory.

\begin{lstlisting}[language=Prolog]
% These define the relation '>=/2'
:-modeb(*,gteq(+number,#number)).
:-determination(adult/1, gteq/2).

gteq(A,B):- 
    number(A),number(B), 
		A >= B.
\end{lstlisting}

The above declarations allow to induce hypothesis like the next:

\begin{lstlisting}[language=Prolog]
adult(A,B):- 
    age(A,B), B >= 25.
\end{lstlisting}

However, each bottom clause is constructed from a single example, therefore
the Aleph basic algorithm can not evaluate predicates where the numerical
result must be calculated by using a set of examples (trigonometric
functions, linear regression models, inequalities, etc.). To overcome
this drawback a \emph{lazy evaluation} approach is proposed in \cite{Srinivasan99numericalreasoning}.

Lazy evaluation allows to evaluate predicates with numerical results when required.
In the case of the relations \lstinline[language=Prolog]!gteq/2, lteq/2!, the user must define an additional declaration
for each predicate to evaluate lazily during the learning process. Such predicates receive 
as parameter a set of negative and positive examples and return a numerical result.

\begin{lstlisting}[language=Prolog]
% Declarations for lazy evaluation
:-lazy_evaluate(gteq/2).    
:-lazy_evaluate(lteq/2).   

% Additional predicates for relations defined by the user
gteq([PosEx,NegEx],Value):-   lteq([PosEx,NegEx],Value):-
               :                         :
               :                         :
\end{lstlisting}

\smallskip

Other lazy evaluation techniques have been proposed and tested in
\cite{DBLP:journals/spe/FonsecaCRCS09,ruicamacholazyevaluation}.

 

\subsection{Discretization in TILDE}

One of the advantages offered by propositional learning
(over ILP) is the handling of numerical attributes.
This advantage is adapted to the ILP context in the
\emph{ Top-Down Induction of Logical DEcision Trees (TILDE)} 
system \cite{Blockeel97lookaheadand}.


In this system theories are represented as logic decision trees,
and learning is based on interpretations, namely each example
can have several tuples. 
This paradigm for ILP is called \emph{learning from interpretations}
and described in \cite{Blockeel97lookaheadand} as follows:


{\flushleft {\bf Learning from interpretations}}\\

{\bf Given:}

\begin{itemize}
	\item a set of classes $C$,
	
	\item a set of classified examples $E$, and
	
	\item a background knowledge $B$.
	
\end{itemize}

\medskip

{\bf Find a theory $T$, such that $\forall e \in E:$}

	\begin{itemize}
	    \item  $H \wedge e \wedge B \models c$ {\emph (completeness)}, and 
			
			\item  $H \wedge e \wedge B \not\models c'$ {\emph (consistency)}
  \end{itemize}

Where: $c$ is the class of the example $e$ y $c' \in C -  \left\{c\right\}$.\\


TILDE deals with numerical attributes by means of discretization
which is carried out with the Minimum Description
Length Principle discretization method (MDLP) which is based on 
information gain minimization \cite{FayyadI93}.
This discretization approach has been adapted in the following manner:

First, the final user must declare the attributes which he wants to be discretized.
For this, the meta predicate \lstinline[language=Prolog,basicstyle=\ttfamily]!to_be_discretized! indicates 
both the literal and its numerical arguments (attributes).
Here is an example illustrating a declaration that indicates the argument to
be discretized:


\begin{lstlisting}[language=Prolog]
to_be_discretized(age(Person, Age), [Age]).
\end{lstlisting}

Then, each argument declared is discretized one time before the learning process
(global discretization). 
Thus, for the declaration given in the above declaration, the following result might be obtained:

\begin{lstlisting}[language=Prolog]
discretized(adult(Person, Age),[Age], [18]).
\end{lstlisting}

To use the numerical splits the user can define equalities or
inequalities by using modes declarations (as in the Aleph system).
For the result shown in the preceding example, the corresponding
mode is as follows:

\begin{lstlisting}[language=Prolog]
rmode(#(C:threshold(employee(_, _, _, Age),[Age], C),
        +Age < C)).
\end{lstlisting}

The predicate \texttt{rmode} determines the refinement operator and
the language bias. In this case \texttt{rmode} indicates
that the literal \texttt{employee} must be added to
refine a clause. The variable \texttt{Age} will be compared with
the threshold \texttt{C}.
Details of the predicate \texttt{rmode} are presented in \cite{ace1}.
Other ILP systems that use discretization based on information gain
minimization are ICL \cite{Laer96multi-classproblems,Raedt95inductiveconstraint},
INDUBI/CSL \cite{MalerbaESC97}, and ECL-GSD \cite{DivinaM05}.



\subsection{Constraint Logic Programming in NUM algorithm}

{\it Constraint Logic Programming (CLP)} is a programming paradigm in
which a {\it Constraint Satisfaction Problem (CSP)} is proposed in a
logical language. CLP joins two declarative programming paradigms:
logic programming and constraint programming \cite{JaffarL87}.  CLP
unlike programming logic has the advantage of processing more
efficiently numerical values. There are CLP systems over different
domains: $\mathds{N}, \mathds{Z}, \mathds{R}$. The following ILP system benefits
from this advantage to deal with numerical values.


{\bf NUM algorithm \cite{Anthony97generatingnumerical}} handles numerical
attributes by means of its refinement operator which can add two literal types. 
The first one is built with the predicate symbols into the background
knowledge. The second one is called {\it numerical} literal. 
This last one can be created in this way:

\begin{enumerate}

   \item Users must define {\it a priori} the form of each numerical literal (linear, quadratic, etc).
         For instance, a linear literal would be  $Y=C_1X+C_2$, where $C_1,C_2$ are constants to be calculated
         by a $CLP$ system \cite{JaffarMSY92}, and $X,Y$ are either new variables or variables already present in the clause.
   \item If the refinement operator must add a numerical literal (to specialize any clause):
   
         \begin{enumerate}
               \item The NUM algorithm creates systems of equations
                 which depend on the form stated by the user, and the
                 number of declared constants. For example, if the
                 numerical literal is linear, $Y=C_1X+C_2$, the system
                 introduces a system of two equations and two
                 unknowns, where $C_1$ y $C_2$ become variables; and
                 $X,Y$ are replaced by the values of the background
                 knowledge and the examples:\\
               
                     Eq. 1) $5 = C_1 6 + C_2$ \hspace{1cm} Eq. 2) $-7
                     = C_1 3 + C_2$\\
                     
               \item The $CLP$ system solves each system of
                 equations. Each solution represents a numerical
                 literal. With the example given earlier, we obtain
                 the next numerical literal: $Y = 4X - 19$.
                          
               \item The refinement operator uses the new literals to
                 specialize the current clause.
                     
         \end{enumerate}

\item The learning process goes on.
\end{enumerate}


\subsection{SMART+}

\emph{SMART+} system described in \cite{BottaG93} is an extension
of the SMART system which follows a top-down learning strategy.
To optimize globally the numerical arguments that occur in a clause 
$\varphi$, SMART+
includes a genetic algorithm which was adapted from the approach
proposed by Goldberg \cite{Goldberg:1989:GAS:534133}.
This GA is performed before specializing process as follows.

Chromosomes are represented by binary strings. For this,
all arguments in $\varphi$ define a string of reals:
$rs = k_1, \ldots, k_n$. Then this string is converted into
a binary string. To avoid any genetic operator generates
values outside the range for some $k_i$, the following
algorithm is proposed to transform $rs$ into a binary string $bs$.\\


\parbox{11cm}{

\emph{a. }Given a parameter $k_i$ and the range $\left[min_i,max_i\right]$
          assigned by the user, then $k_i$ is represented as follows:
					
    \begin{equation}
		       k_i = min_i + \delta_i / \Delta
		\label{smart1}
		\end{equation}

    Where $0 \leq \delta_i \leq 2^{N_i}$,
		$N_i$ is the number of bits chosen by the user
		for representing the increment $\delta_i$.
		In addition, $\Delta$ is 
		
		\begin{equation}
		   \Delta_i = \frac{2^{N_i}}{max_i-min_i}		
		\label{smart2}
		\end{equation}

\emph{b. } Given a string $rs = k_1, \ldots, k_n$, for each argument $k_i$
    in $rs$ a set of $N_i$ bits will be defined in $bs$, where the
		value of the corresponding $\delta_i$ is represented.\\

}


The GA generates an initial population $A(\varphi)$ and the
information gain (fitness) of each chromosome is calculated.
After this, the usual genetic algorithm is executed.

Two crossover operators are defined. The first one is the
standard single point crossover, and the second one
is defined below.\\

\parbox{11cm}{


Given two chromosomes (parents) $bs_1, bs_2$, and a parameter
$k_c$ randomly selected, offspring ${bs_1}', {bs_2}'$ are generated
in three steps.

\begin{enumerate}
   \item[i] The parameters to the left of $k_c$ in
          ${bs_1}'$ and ${bs_2}'$ are obtained
					from $bs_1$ and $bs_2$ respectively.

   \item[ii] The parameter $k_c$ in both offprings is obtained
          by averaging the two corresponding values in the parents.
					
   \item[iii] The parameters to the right of $k_c$ are obtained
          in ${bs_1}'$ by copying the corresponding ones in
					$bs_2$ and vice versa in ${bs_2}'$.\\
\end{enumerate}

}

To select between these crossover operators $c_1$ and $c_2$, two probabilities $p_1$ and $p_2$
are defined respectively, with the constraint: $p_1 + p_2 \leq 1$.


The mutation operator is standard, namely
a subset of genes is chosen at random and
its values are changed. This operator is applied
with a probability $p_{mut}=0.001$.




\subsection{Multivalue learning in ILP {\it (MVL-ILP)}}\label{mvl_ilp}

The approach proposed in \cite{omt1} deals both with
numerical and categorical attributes.  This method 
creates a split $d$ for each numerical attribute, and
two subsets of values for each categorical attribute.
This information and additional predicates are used
to create new (multivalue) clauses and to enrich
the background knowledge. This approach is described
below.

\begin{enumerate}
\item  Discretization/Grouping.
In this step, each attribute $A=\left\{x_1,  \ldots, x_n\right\}$ is discretized or grouped in a binary way.

When $A$ is numerical, a split point $d$ is created (discretization). This divides the numerical
interval of $A$ in two subintervals $I_1 = \left[l_1,r_1\right]$ and $I_2 = \left[l_2,r_2\right]$
such as $l_1, l_2, r_1, r_2 \in A$, and

  \begin{center}
    $r_1 \leq d \leq l_2$
  \end{center}

When $A$ is categorical, two  subsets of categorical values $S_1$ and $S_2$ are created (grouping).
These subsets fulfill the following conditions:
   
   \begin{center} 
      $S_1 \cup  S_2 = A$
      
      \medskip
    
      $S_1 \cap S_2 = \phi$
   \end{center}

This step is carried out by the split point selection algorithm of 
two decision tree inducers: 
Quick Unbiased Efficient Statistical Tree - QUEST \cite{quest1},
and Classification Rule With Unbiased Interaction Selection
and Estimation - CRUISE \cite{cruise1} which are explained later.
In addition, the final user selects {\it a priori} one of these
algorithms as well as each attribute that will be processed.\\



\item Constructing multivalue clauses.
After discretization/grouping process both the splits and the subsets of
categorical values are used to express multivalue clauses.
If a literal within the body of a clause has at least one argument with
more than one value, then that clause is called \emph{multivalue}.
For each numerical attribute a pair of multivalue clauses like these are created. 
    

\begin{lstlisting}[language=Prolog]
adult(P):- 
     age(P,Age), Age <= 21.
		
adult(P):- 
     age(P,Age), Age > 21.
\end{lstlisting}


If $A$ is categorical, then a pair of multivalue clauses like the following are built.


\begin{lstlisting}[language=Prolog]
class(Animal,reptile):- 
     cover(Animal,Cover), member(Cover,[none,scales]).
		
class(Animal,reptile):- 
     cover(Animal,Cover), member(Cover,[feathers,hair]).
\end{lstlisting}



\item Modification of background knowledge.
In this step all multivalue clauses are aggregated to the background
knowledge. This addition enriches the language used in hypothesis constructing.\\

\item Usage. Finally, the new information is used by the ILP algorithm to 
induce a theory.\\
\end{enumerate}


As described below, the QUEST and CRUISE's split point selection algorithm carries out the discretization/grouping process
in the two following steps.\\


\emph{1. CRIMCOORD transformation. }Before splitting, both QUEST and CRUISE transform each categorical attribute into a numerical one.
If $A=\left\{x_1,  \ldots, x_n\right\}$, is a categorical attribute, then:

         \begin{itemize}
	             \item A \emph{dummy binary vector} is assigned to each $x_i$ with $i \in \left\{1, \ldots, n\right\}$.
	                   For example, the attribute $A = \left\{red, green, blue\right\}$ has
	                   the dummy vectors: $red = 100$, $green = 010$, $blue = 001$.
	                   
	             \item After each dummy vector is projected onto the largest discriminant
	                   coordinate (CRIMCOORD). This technique allows to transform
	                   a multidimensional vector (dummy) to a numerical value \cite{gnanadesikan1997methods}.
         \end{itemize}


\emph{2. Selection of split point. }

          \begin{itemize}
					       \item QUEST.	                     
											 The $k-means$ algorithm \cite{Macqueen67somemethods}
	                     with $k=2$ is performed to the values of $A$ to form two subsets of
	                     numerical values: $A_1$ and $A_2$. This process ensures a binary split.
	      
                       After a quadratic discriminant analysis (QDA) \cite{friedman1} is  applied to
	                     calculate the split point $d$ between $A_1$ and $A_2$.


                  \item CRUISE.									
									      A Linear Discrimination Analysis (LDA) calculates the best split $d$. 
                        Since LDA is most efficient with normal distributions, before
                        applying LDA, a BOX-COX transformation is used to improve the normality of $A$.									
					\end{itemize}					


\subsection{SIA01}\label{sia01}

The SIA01 system \cite{Augier95learningfirst} is an ILP system that represents each chromosome with 
a logic format. A clause is a chromosome, and its predicates and arguments are its genes. For instance, the clause $p\left(X,Y\right)\stackrel{}{\leftarrow}r\left(X,12,a\right)$
is a chromosome comprised of seven genes: 


\begin{figure}[h]
    \centering
		\includegraphics[scale=0.15]{cromosoma1.eps}
	\label{fig:cromosoma1}
\end{figure}


The fitness function is based on the consistency and the completeness of the current clause. This function penalizes those clauses
that cover more negative examples, and rates positively those clauses that cover more positive examples.

{\bf Mutation. }If the gene is a numerical value, e.g. $12{.}3$, then the mutation operator will create an interval
which will contain that value. If the gene is a numerical interval, then the operator will enlarge it. This is shown as follows:\\

\begin{figure}[h]
    \centering
		\includegraphics[scale=0.12]{cromosoma2.eps}
	\label{fig:cromosoma2}
\end{figure}




If the gene is a categorical value, then a set of categorical values is created adding another value. If the gene is a
set of categorical values, then one value is added, for example:\\

\begin{figure}[h]
    \centering
		\includegraphics[scale=0.12]{cromosoma3.eps}
	\label{fig:cromosoma3}
\end{figure}

{\bf Crossover. }This operator only exchanges genes with the same data type.

\begin{figure}[h]
    \centering
		\includegraphics[scale=0.1]{cromosoma4.eps}
	\label{fig:cromosoma4}
\end{figure}


Any choice to enlarge some range is random. Hence this is an
unsupervised process. That can be an disadvantage, since if the range
is huge, then it may be not possible to test too many values.\\

As we have seen, unlike TILDE this approach is capable to deal both with numerical and categorical 
attributes and does not test an entropy-based function to discretize.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Extended Aleph}\label{proposed_method}

To enrich the hypothesis language our method tests different numerical subintervals (or categorical
subsets) according to an evaluation function, then the best qualified are added into the background knowledge.
Due to the great amount of subintervals/subsets
that can be created we decided to use an evolutive approach to generate and evaluate them.
This method was embedded in the basic algorithm of Aleph as follows:


\begin{enumerate}
\item[0.] State parameters. In addition to the mode declarations, types and determinations,
users can also declare the attributes to be discretized/grouped in any of the two
following ways:

\begin{enumerate}
	\item $\mbox{\em Approach}(Goal(\ldots, \mbox{\em link, class, \ldots})$, $\mbox{\em Lit}
	      (\ldots, \mbox{\em link, attribute},  \ldots)).$
	       	      
	      \smallskip	      
	      
				Where {\em Approach} can be either {\em discretization} or {\em grouping}.
				The argument {\em link} allows to relate the literal {\em Lit} that contains the 
				attributes to be processed with the goal relation {\em Goal}. In addition, {\em attribute} 
				will be discretized/grouped respect to {\em class} which has two or more values. 
				It should be clarified that it is possible to declare more one than attribute in {\em Lit}.		
  \smallskip
													
  \item $\mbox{\em Approach}(Goal(\ldots, \mbox{\em link, \ldots})$, $\mbox{\em Lit}
	      (\ldots, \mbox{\em link, attribute},  \ldots)).$
	
	      In this case the corresponding classes are not explicitly defined, therefore	      
	      are $positive$ and $negative$. 
\end{enumerate}


For example, in the database to predict mutagenicity \cite{DBLP:journals/ai/SrinivasanMSK96}
the goal relation is \texttt{active/1}, if the user wants to discretize the final
argument and to group the third one in the relation \texttt{atm/5} then the following lines must be added:

\begin{lstlisting}[language=Prolog]
:-set(discretization,true).    % Activate discretization
:-set(grouping,true).          % Activate grouping

% Arguments with ``no'' will not be processed 
:-discretization(active(link),atm(link,no,no,no,attribute)).
:-grouping(active(link),atm(link,no,attribute,no,no)).
\end{lstlisting}


\item Select an example $e$. In this step a {\em FixedSubinterval} or a {\em FixedSubset}
is defined for each attribute declared in the preceding step.

\smallskip

Let {\em attribute} be a numerical argument, then all values $\left[\mbox{\em min} \leq \dots
\leq \mbox{\em max}\right]$ related to the example $e$ compose a {\em FixedSubinterval}.

\smallskip

Let {\em attribute} be a categorical argument, then all values $\left\{v_1 , \ldots, v_n\right\}$
related to the example $e$ compose a {\em FixedSubset}.

\smallskip

These are {\it fixed} because they will invariably be in all subintervals/subsets that will be tested.

If the example selected, in the mutagenecity problem, is {\em active(d1)} which is related with the facts

\begin{lstlisting}[language=Prolog]
atm(d1,d1_1,c,22,-0.117).     atm(d1,d1_4,c,195,-0.087).
atm(d1,d1_8,h,3,0.143).       atm(d1,d1_9,h,3,0.142).
atm(d1,d1_11,c,27,-0.087).    atm(d1,d1_22,h,3,0.143).
atm(d1,d1_23,h,3,0.142).      atm(d1,d1_24,n,38,0.812).
\end{lstlisting}

then \lstinline[language=Prolog]!FixedSubinterval=[-0.117..0.812]!
and \lstinline[language=Prolog]!FixedSubset=[c,h]!.\\


\item[1.5.] Discretization/Grouping.
Before saturation each attribute declared by the user is processed.
Depending on the attribute type there are two possible cases.\\

% \begin{adjustwidth*}{0.5cm}{0.5cm}

\begin{enumerate}
\item Let {\em attribute} be a numerical argument and
$\mbox{\em FixedSubinterval} = \left[\mbox{\em min,max}\right]$ be the interval that
contains all values in {\em attribute} that are related to $e$. 
Then the genetic algorithm returns a set
of intervals $I_n= \left\{x \mid x = \left[\mbox{\em min',max'}\right]
\wedge \mbox{\em min'} \leq \mbox{\em min} \wedge \mbox{\em max} \leq \mbox{\em max'}\right\}$.
Where each element in $I_n$ represents a chromosome.


To evolve the chromosomes several mutation operators are defined. These
can enlarge or shrink an interval, either on the left side, on the right side, or on both sides.
The fitness function for a chromosome $x$ is based on information gain, see Eq. (\ref{fitness_information_gain}).


\begin{equation}
\mbox{\em IG}(x) = \mbox{\em ent}(\mbox{\em Int}) - \frac{\mbox{\em count}(x)}{\mbox{\em count}(\mbox{\em Int})} \mbox{\em ent}(x)
\label{fitness_information_gain}
\end{equation}

The corresponding entropy for some interval {\em Int} is presented in Eq. (\ref{fitness_information_gain2}):

\begin{equation}
   ent(\mbox{\em Int}) = - \sum^{\abs{S}}_{j=1} p(\mbox{\em Int},j) log_2 (p(\mbox{\em Int},j))
\label{fitness_information_gain2}
\end{equation}

      where:	$\abs{S} = $ number of classes,
      {\em Int}  is the numerical interval of {\em attribute},
			$\mbox{count}(\mbox{\em Int}) =$ numeric values into interval {\em Int},
			$p(\mbox{\em Int},j)$, is the proportion of values in {\em Int} that belong to the {\em jth} class.



\item Let {\em attribute} be a categorical argument and {\em FixedSubset} be the set of all
values related to $e$. In this case the GA searches for the best subsets of categorical values
$C = \left\{y \mid FixedSubset \subseteq y\right\}$.

A chromosome is represented by a subset $y$.
The mutation operators defined add a new categorical value
in $y$, delete an element from the chromosome, or swap an element
between the chromosome and the rest of categorical values, but maintaining the
FixedSubset into each new chromosome. The crossover operator swaps an
element between two chromosomes.

In this case, the fitness function is based on the DIstance Learning in Categorical
Attributes method (DILCA) \cite{IencoPM09}. The fitness for a chromosome $y$ is the total sum
of the distances for each value pair in $y$. In addition to this,
if the sum of distances for two or more chromosomes is the same, then the value
$\frac{1}{\abs{y}}$ is added, because we want to minimize and to favor the subsets
with more values. Unlike the method presented in \cite{omt1},
this function does not need to transform all 
categorical values into numerical type.
The fitness of a chromosome $y$ is showed in Eq. (\ref{categoricalFitness}).



\begin{equation}
Fitness(y) = \sum_{c_i c_j \in y} \sqrt{  \sum_{s_t \in S} (  P ( c_i \mid s_t ) - P( c_j \mid s_t )   )^2 } +\frac{1}{\abs{y}}
\label{categoricalFitness}
\end{equation}

where: $c_i, c_j \in y$ such as $i \neq j$.  $S$ is the set of classes defined,			
						$P ( c_i \mid s_t )$ is the conditional probability for $c_i$ given $s_t$, and $P ( c_j \mid s_t )$ is the
						conditional probability for $c_j$ given $s_t$.\\

In both cases the user can give the number of chromosomes to be added\\
\end{enumerate}

In addition, for each chromosome new types are defined:

\begin{lstlisting}[language=Prolog]
discr_split(-0.5).       % Interval [-0.5,0.9]    
discr_split(0.9).

discr_split(-0.375).     % Interval [-0.375,0.9]
discr_split(0.9).
:
grp_subset([c,h,o]).
grp_subset([c,h,n]).
grp_subset([c,h,br]).
grp_subset([c,h,o,n,br]).
:
\end{lstlisting}

To use this types in the background knowledge, the following relations as well
as their their respective mode and determination declarations are added
to the language: 

\begin{lstlisting}[language=Prolog]
% Relation               Use

grp_member/2     % grp_member(Var,[c,h,o]).
discr_lteq/2     % discr_lteq(Var,0.9).
discr_gteq/2     % discr_gteq(Var,0.9).
discr_between/3  % discr_between(Var,-0.5,0.9).

:-modeb(*,grp_member(+VarType,#grp_subset)).
:-modeb(*,discr_lteq(+VarType,#discr_split)).
            :                        :
:-determination(Goal_Pred/Arity, grp_member/2).
:-determination(Goal_Pred/Arity, discr_lteq/2).
            :                        :
\end{lstlisting}



\bigskip

\item Build a bottom clause that entails the example $e$. The
enriched language is used to build a bottom clause.
The bottom clause can be like this:

% \lstset{basicstyle=\small}
\begin{lstlisting}[language=Prolog]
[Bottom clause]
active(A) :-
      atm(A,B,C,D,E),logp(A,F),
      discr_lteq(E,-0.59]),discr_lteq(E,0.97]),
      discr_gteq(E,-0.59]),discr_gteq(E,0.97]),
      discr_lteq(F,0.81),discr_lteq(F,27.4]),
      discr_lteq(F,0.81),discr_lteq(F,27.4]),
      grp_member(C,[c,h,o]),grp_member(C,[c,h,n]),
      grp_member(C,[c,h,br]),grp_member(C,[c,h,n,br]).
\end{lstlisting}

\item Search. Aleph has several search strategies (best first, depth first search, etc.), refinement operators, 
and evaluation functions. These parameters are defined by the user.
                   
		
\item Remove covered (redundant) examples and add the best clause found to the current theory. Go to step 1.


\end{enumerate}

\subsection{An example}

Consider the Student Loan dataset \cite{uci} and its goal relation \texttt{no\_payment/1} which is true for those
students who do not need to repay its loan (positive examples) and false
in other case. In addition, the following relations compose the
background knowledge: 

\begin{lstlisting}[language=Prolog]
male(S).
absence_from_school(S,Int).
enrolled(S,School,Int).  % School={ucsd,ucb,ucla,uci,occ,smc}
filed_for_bankruptcy(S).
unemployed(S).
\end{lstlisting}

As we see, there are two numerical attributes and one categorical attribute.
Extended Aleph finds a theory with the following steps:

\begin{enumerate}

\item[0.] State parameters. In addition to modes and determinations,
the user must add some declarations to indicate the attributes
that will discretized or grouped. The argument \emph{link} allows to indicate the goal relation
and the class argument (when is defined).

\begin{lstlisting}[language=Prolog]
:-discretization(no_payment(link),
                 enrolled(link,no,attribute)).								
:-discretization(no_payment(link),
                 absence_from_school(link,attribute)).
:-grouping(no_payment(link),enrolled(link,attribute,no)).
\end{lstlisting}


\item Select an example $e$. This step is normally carried out by Aleph.\\

1.5. Discretization/Grouping.
Extended Aleph tests the best intervals and splits for each numerical attribute, and
the best categorical subsets for each categorical attribute. For example,
if the categorical values: \texttt{ucb,ucla} are related with the example $e$, then
some subsets to be tested are: 

\begin{lstlisting}[language=Prolog]
School = {ucb,ucla}
School = {ucb,ucla,uci}
School = {ucb,ucla,smc}
School = {ucb,ucla,uci,smc}
\end{lstlisting}

On the other hand if $e$ is related to integer value $5$ in the relation \texttt{absence\_from\_school}
then Extended Aleph will test other subintervals such that these contain value value. For instance:

\begin{lstlisting}[language=Prolog]
absence_from_school = [4,6]
absence_from_school = [3.7,5]
absence_from_school = [5,5.6]
absence_from_school = [3.4,6.3]
\end{lstlisting}

Later each subset of categorical values is added into the background with the relation \texttt{aleph\_grp\_subset/2}.
For each numerical value in subintervals an split is added with the relation \texttt{aleph\_discr\_split/2}.


\begin{lstlisting}[language=Prolog]
grp_subset([4,6]).
grp_subset([3.7,5]).
:
discr_split([3.7,5]).
discr_split([3.4,6.3]).
:
\end{lstlisting}


\item Build a bottom clause. Aleph creates the bottom clause with
the new information in background knowledge. The following clause represents
a typical bottom clause in this approach.


\begin{lstlisting}[language=Prolog]
no_payment(A) :-
   unemployed(A),absence_from_school(A,B),enrolled(A,C,D),
   filed_for_bankruptcy(A),discr_gteq(B,4),discr_gteq(B,6),
   discr_gteq(B,3.7),discr_gteq(B,6.3),discr_lteq(B,4),
	 discr_lteq(B,6),discr_lteq(C,3.7), discr_lteq(B,6.3),
   grp_member(C,[occ,smc,uci,ucla,ucsd]),
	 grp_member(C,[smc,ucb,ucla,ucsd]),
   grp_member(C,[occ,ucb,ucla,ucsd]), 
   grp_member(C,[ucb,uci,ucla,ucsd]).
\end{lstlisting}


\item Search. The proposed approach does not affect the algorithm selected by the user. Thus,
with enriched bottom clause Extended Aleph carry out the search.
                   
		
\item Remove covered (redundant) examples and add the best clause found to the current theory. Go to step 1.

Thus, if the discretized/grouped attributes are relevant to the ILP problem treated then 
the final theory will contain clauses with information obtain during the preprocessing data.
The following clauses show this difference.


\begin{lstlisting}[language=Prolog]
[Extended Aleph]                 [Aleph]
[Theory]                         [Theory]

[Rule 1]                         [Rule 1]
[PosCover=98 NegCover=0]         [PosCover=18 NegCover=0]
no_payment_due(A) :-             no_payment_due(A) :-
  enrolled(A,B,C),                   enrolled(A,ucsd,5).
  discr_gteq(C,11.56),                                       
  discr_lteq(C,14.5).     
   
[Rule 7]                          [Rule 7]   
[PosCover=48 Neg cover=3]         [PosCover=12 NegCover=2]
no_payment_due(A) :-              no_payment_due(A) :-
  enrolled(A,B,6),                   enrolled(A,uci,8).
  grp_member(B,[ucb,ucla,uci]).
	           
\end{lstlisting}

\end{enumerate}

In the next section we present the experiments performed to compare the accuracy of our method.



\section{Experimental results}
\label{experimental_results}

We used several datasets to determine whether our
approach improves the accuracy and reduces the
number of rules in the final theories.
The results obtained were compared
with other strategies to deal with numerical attributes:
lazy evaluation in Aleph and discretization in 
the TILDE algorithm.


\subsection{Materials}

The following datasets were used to evaluate the performance
of our approach:

\begin{itemize}
     \item From the UCI machine learning repository \cite{uci}:
		       Australian Credit Approval, Pittsburgh Bridges,
					 Japanese Credit Screening, German Credit,
					 Iris Dataset, Moral Reasoner, Ecoli and
					 Student Loan.
		\item  We also performed tests over the well-known ILP
		       datasets: Carcinogenesis \cite{Srinivasan97carcinogenesispredictions},
					 KRK illegal chess position (KRKi) \cite{Muggleton89anexperimental}, 
					 and mutagenesis \cite{Muggleton94mutagenesis:ilp} (on
	         ``regression friendly'' data).
\end{itemize}

In addition to this, both lazy evaluation and our proposal
are embedded in the Aleph system which is implemented in
YAP Prolog (version 6.2.2.) \cite{journals/tplp/CostaRD12}.
On the other hand, the TILDE learner is included in the ACE
data mining system \cite{ace1}.
All experiments were performed on a modern multicore PC machine.



\subsection{Method}

Since a problem in TILDE can be multi-class and
Aleph deals essentially with binary class problems, we have
divided the datasets used in two types:
those that have examples in two classes, positive and negative;
and those that have examples in three or more classes.

\subsubsection*{Two classes}

In this case we analyzed only datasets with negative and positive
examples: Australian Credit Approval, Carcinogenesis, Japanese
Credit Screening, German Credit, Moral Reasoner, Student Loan,
illegal Chess Position and Mutagenesis.

To compare each analysis made with TILDE and approaches
based on Aleph we defined the following equivalences
in the corresponding declarations.


\begin{lstlisting}[language=Prolog]
Aleph format                 TILDE format

goal(...).                   goal(...,pos). %positive example
:
goal(...).                   goal(...,neg). %negative example
:

modeb(1,lit(+t1,-t2,#t3)).   rmode(1,lit(+T1,-T2,#)).

% types
t1(value1). t1(value2)...    type(lit(t1,t2,t2)).
\end{lstlisting}

It should be noted that unlike the format Aleph in TILDE
each example must have a class argument. In this case
there are only two classes: positive and negative.


For each dataset a 10-fold cross validation was executed with
the following settings.


\subsubsection*{Multiple classes}

In this type the following datasets have more than two
classes: Pittsburgh bridges (6 classes), Iris
dataset (3 classes) and Ecoli (8 classes). 
The equivalences for these datasets are as follows:

\begin{lstlisting}[language=Prolog]

Aleph format                TILDE format

set(evanfn,posonly).

% positive examples
goal(...,class1).           goal(...,class1).
goal(...,class2).           goal(...,class2).
:                           :
goal(...,classN).           goal(...,classN).
\end{lstlisting}

The main difference in this case is the use
of positive examples only. Since positive examples, in
the format Aleph, have a class argument, it is not
necessary to add it in TILDE.

In addition to this, the following parameters
are equivalent:

\begin{lstlisting}[language=Prolog]
Aleph format              TILDE format

% Minimum accuracy of a rule (Aleph) or a node (TILDE)
set(minacc,0.80).         accuracy(0.80).

% Lower bound on the number of examples to be covered
set(minpos,2).            minimal_cases(2).
\end{lstlisting}




\subsection{Results}

The table \ref{tab:twoclasses} shows the accuracy and the number of rules
for each dataset analyzed.


\begin{table}[h]
% table caption is above the table
\caption{This table shows both the accuracy and
the number of rules for each dataset. These results
were obtained with lazy evaluation in Aleph, TILDE
and Extended Aleph. The best values are shown in
bold.}
\label{tab:twoclasses}       % Give a unique label
% For LaTeX tables use
\centering
\scalebox{1.2}[1.1]{
\begin{tabular}{lllllll}
\hline\noalign{\smallskip}
Dataset & \multicolumn{2}{|c|}{Lazy Evaluation} & \multicolumn{2}{|c|}{TILDE} & \multicolumn{2}{|c|}{Extended Aleph}\\
\noalign{\smallskip}\hline\noalign{\smallskip}
       & $A(\%)$ &  {\em Rules} & $A(\%)$ &  {\em Rules} & $A(\%)$ & {\em Rules}\\
\cmidrule(l){2-3} \cmidrule{4-5} \cmidrule{6-7}
%\noalign{\smallskip}\hline\noalign{\smallskip}
Australian & 60 & \bf 5.2 & 72 & 30 & \bf 75.7 & 18 \\

Japanese Credit & 70.4 & 6.7 & 68 & 10.9 & \bf 71.1 & \bf 5.7 \\

German Credit &  71.4 & 60.2 & 71.4 & 69.8 & \bf 71.5 & \bf 54.98 \\

Moral Reasoning & \bf 96 & \bf 1 & 88.1 & 3 & \bf 96 & \bf 1\\

Carcinogenesis & 55.9 & 17.09 & \bf 63 & 34.3 & 59.3 & \bf 14.37 \\

KRKi  & 49 & 94.1 & \bf 50.5 & 134.9 & 49 & \bf 88.18 \\

Student Loan  & 62.6 & \bf 3.9  & 82.1 & 5 & \bf 97.3 & 7.7   \\

Mutagenesis  & 77.7 &  4.7 & \bf 84 & \bf 3.7 & 83.7 & 8.22 \\

Pittsburgh bridges & 92.8 & 7.72 & 62 & 12.5 & \bf 95.5 & \bf 7.6 \\

Iris & 97 & \bf 3 & 93.2 & \bf 3 & \bf 99 & 4.64 \\

Ecoli & \bf 97 & \bf 7.6 & 77.98 & 11.8 & \bf 97 & 9.42 \\
\noalign{\smallskip}\hline
\end{tabular}}
\end{table}



Regarding the accuracy, our method is better than the most of the results obtained
with lazy evaluation in Aleph (thereafter called lazy Aleph) and TILDE. 
For Carcinogenesis, KRKi and Mutagenesis datasets, TILDE constructs theories with better 
accuracy, however Extended Aleph decreases the number of rules in two of them
with a competitive accuracy.

Despite Extended and Lazy Aleph discretize the same numerical attributes
in some datasets (without dealing with categorical data) our approach is more
accurate. This results, we believe, are mainly from the two following reasons:

\begin{itemize}
    \item Lazy Aleph only uses the information in the background knowledge
		      as numerical splits. Instead, Extended Aleph calculates numerical
					or categorical information that is not in the dataset and this is
					used during the learning process.
					
					For instance, suppose that we want to classify the following
					numerical values related to positive and ne\-gative examples:					
					{\small 
					$(1,+)$, $(2,+)$, $(3,+)$, $(4,+)$, $(5,+)$, $(6,+)$, $(7,+)$, $(8,+)$,
					$(9,+)$, $(10,+)$, $(11,-)$, $(12,-)$, $(13,-)$, $(14,-)$, $(15,-)$,
					$(16,-)$, $(17,-)$, $(18,-)$, $(19,-)$, $(20,-)$.}
					
					In this case both Extended and Lazy Aleph find a suitable split\\
					
					Lazy Aleph split $= 11$
					Extended Aleph split $= 10.746$\\
					
					However if the background knowledge does not contain the values
					{\small $(11,-)$, $(12,-)$, $(13,-)$} then only our approach finds a more accurate
					split.\\
					
					Lazy Aleph split $= 14$
					Extended Aleph split $= 10.932$
					
    \item Unlike Lazy Aleph the proposed method can discretize
		      datasets with two or more classes. The Pittsburgh bridges dataset is
					a good example, since it has six classes.
\end{itemize}

TILDE can also discretize multi class problems, but this process is only
global, namely numerical attributes are discretized one time before the process learning.
On the other hand, Extended Aleph can discretize either globally or locally. For this,
the user can use the following parameters:

\begin{lstlisting}[language=Prolog]
set(grouping_scope, grouping_global).
set(grouping_scope, grouping_local).

set(discretization_scope, discretization_global).
set(discretization_scope, discretization_local).
\end{lstlisting}


An essential difference between approach and
Lazy Aleph - TILDE is the categorical data handling,
the SIA01 system attempts to handle them but, but
this only has a mutation operator which generalize
but not specialize.
Finally, it should be clear that Extended Aleph
is an extension of MVL-ILP which deal with categorical data
but in a binary way.
 
 
\section{Conclusions and Future Work}\label{conclusions}

With the obtained results,  we can draw some conclusions: 

\begin{itemize}
	\item This method does not guarantee to improve the accuracy
        and simplicity of the theories in all cases. The improvement depends on
        the selected attributes, namely if an attribute is relevant in the theory 
        construction then its discretization/grouping will help to
        improve the final theory.
  \item ILP problems whose final theories do not need constants to be induce on 
        them can not benefit from this method. In this case, variables are useful 
        to better explain that concept.
        
  \item The size of the search space can be affected with the proposed method. 
        On one hand a discretized/grouped attribute which is not relevant can generate 
        a large amount of unnecessary candidate rules. 
        On the other hand a discretized/grouped attribute which is relevant can 
        decrease significantly	the number of candidate rules.
  \item Users do not know in advance which attributes will be most useful, therefore 
        intuition is an important element to select each attribute.
        Our method allows to experiment easily with different attributes.

  \item Finally, unlike other genetic approaches like SMART+ \cite{BottaG93}, our method 
        can deal with both categorical and numerical attributes. Moreover, just as 
        systems based on learning from interpretations our approach can discretize/group 
        with regard to more than one class, not only positive and negative examples.
\end{itemize}
					
					

Some considerations for further work are as follows.
Since not all cases were successful, in terms of accuracy and simplicity, it's necessary to implement and test other fitness functions and more
datasets. These tests will help us to identify the factors which affect both accuracy and simplicity of the final theories, as well as the
search space size. 
Another path of research is to look into
multivariable predicates, if there are correlations between two or more attributes.
Thus, we want to investigate if these correlations are relevant to improve the performance in ILP (accuracy, simplicity and time of execution)
and what kind of problems can be treated with these predicates.
Finally, to identify whether an attribute is relevant or not, it could be useful in a feature selection process to remove irrelevant literals.
We want to implement in Aleph system these ideas and compare performance with other ILP systems that handle data types.


\bibliographystyle{spmpsci}
\bibliography{biblio}



\end{document}





\begin{comment}
\lstdefinelanguage{Prolog}{
basicstyle=\small,
morekeywords={modeb, modeh, discretization, grouping, determination,
               grp_member, discr_lteq, discr_gteq,
               discr_between, grp_subset, discr_split, 
               lteq, gteq, member},           keywordstyle=\color{blue},         
stringstyle=\color{red},
% En este caso l indica que es solo una letra el identificador para los comentarios % .......
morecomment=[l]{\%},
% En este caso s indica que es una cadena encerrada entre dos delimitadores para los comentarios /* ..... */
morecomment=[s]{/*}{*/},  commentstyle=\color{green},
stringstyle=\color{purple},
literate=
    {)}{{\textcolor{red}{)}}}{1}
    {(}{{\textcolor{red}{(}}}{1}
    {[}{{\textcolor{red}{[}}}{1}
    {]}{{\textcolor{red}{]}}}{1}
    {:}{{\textcolor{red}{:}}}{1}
    {,}{{\textcolor{red}{,}}}{1}
    {\{}{{\textcolor{red}{\{}}}{1}
    {\}}{{\textcolor{red}{\}}}}{1}
    {+}{{\textcolor{purple}{+}}}{1}
    {-}{{\textcolor{purple}{-}}}{1}
    {\#}{{\textcolor{purple}{\#}}}{1}
  % breaklines=false
}
\end{comment}