package weka.classifiers.trees.IIDT;

import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;

import weka.core.Attribute;
import weka.core.Capabilities;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.NoSupportForMissingValuesException;
import weka.core.Option;
import weka.core.TechnicalInformation;
import weka.core.TechnicalInformationHandler;
import weka.core.Utils;
import weka.core.Capabilities.Capability;
import weka.core.TechnicalInformation.Field;
import weka.core.TechnicalInformation.Type;

/**
<!-- globalinfo-start -->
 * Class for constructing a decision tree based on the LSID3 algorithm. Can deal with nominal and numeric attributes. No missing values allowed. Empty leaves may result in unclassified instances. For more information see: <br/>
 * <br/>
 * S. Esmeir (2006). Anytime learning of decision trees.
 * <p/>
<!-- globalinfo-end -->
 *
<!-- technical-bibtex-start -->
 * BibTeX:
 * <pre>
 * &#64;article{Esmeir2006,
 *    author = {S. Esmeir},
 *    journal = {Machine Learning},
 *    number = {1},
 *    pages = {897-900},
 *    title = {Anytime learning of decision trees.},
 *    volume = {1},
 *    year = {2006}
 * }
 * </pre>
 * <p/>
<!-- technical-bibtex-end -->
 *
<!-- options-start -->
 * Valid options are: <p/>
 * 
 * <pre> -nts
 *  If set, classifier is run in LSID3 mode, 
 *  otherwise classifier is run in ID3 mode. 
 * </pre>
 * 
<!-- options-end -->
 *
 * @author Semion Arest (semjon.arest@gmail.com)
 */
public class LSID3 extends IIDTTreeClass
implements TechnicalInformationHandler, IIDTTree
{
    /**
     * 
     */
    private static final long serialVersionUID = 1L;

    private int m_AttIdx;				//Root tree attribute's index.
    private double m_splitValue;			//Split value for numeric attribute.

    private double[] m_Distribution;		//Values of the class attributes with probability for each value.

    private int nts;				//Number of trees in LSID3 sampling.
    private int numLeaves;				//Number of leaves in the tree.

    private boolean isLSID3TreeChanged;		//Sets to true every time when there is some change in tree.
    //Needed for recalculating number of leaves.

    private List<String> m_excAttributeNames;	//Excluded nominal or numeric attribute names. 
    //Needed for skipping the attribute if it is already in the list.

    private Instances m_Examples;			//Examples in the dataset.

    public LSID3()
    {
	super();
	m_Attribute = null;
	m_AttIdx = -1;
	m_splitValue = Double.NaN;
	m_ClassAttribute = null;
	m_ClassValue = Double.NaN;
	m_Distribution = null;

	nts = 1;
	numLeaves = 0;

	isLSID3TreeChanged = true;
	m_excAttributeNames = new ArrayList<String>();
    }

    public LSID3(int sampleSize)
    {
	super();
	m_Attribute = null;
	m_AttIdx = -1;
	m_splitValue = Double.NaN;
	m_ClassAttribute = null;
	m_ClassValue = Double.NaN;
	m_Distribution = null;
	//		m_Successors = null;

	nts = sampleSize;
	numLeaves = 0;

	isLSID3TreeChanged = true;
	m_excAttributeNames = new ArrayList<String>();
    }

    /**
     * Returns a string describing the classifier.
     * @return a description suitable for the GUI.
     */
    public String globalInfo() 
    {
	return  "Class for constructing an unpruned decision tree based on the LSID3 "
		+ "algorithm. Can deal with nominal and numeric attributes. No missing values "
		+ "allowed. Empty leaves may result in unclassified instances. For more "
		+ "information see: \n\n"
		+ getTechnicalInformation().toString();
    }

    /**
     * Returns an instance of a TechnicalInformation object, containing 
     * detailed information about the technical background of this class,
     * e.g., paper reference or book this class is based on.
     * 
     * @return the technical information about this class
     */
    @Override
    public TechnicalInformation getTechnicalInformation()
    {
	TechnicalInformation 	result;

	result = new TechnicalInformation(Type.ARTICLE);
	result.setValue(Field.AUTHOR, "S. Esmeir");
	result.setValue(Field.YEAR, "2006");
	result.setValue(Field.TITLE, "Anytime learning of decision trees");
	result.setValue(Field.JOURNAL, "Machine Learning");
	result.setValue(Field.VOLUME, "1");
	result.setValue(Field.NUMBER, "1");
	result.setValue(Field.PAGES, "897-900");

	return result;
    }

    /**
     * Returns default capabilities of the classifier.
     *
     * @return      the capabilities of this classifier
     */
    public Capabilities getCapabilities() 
    {
	Capabilities result = super.getCapabilities();
	result.disableAll();

	// attributes
	result.enable(Capability.NOMINAL_ATTRIBUTES);
	result.enable(Capability.NUMERIC_ATTRIBUTES);

	// class
	result.enable(Capability.NOMINAL_CLASS);

	// instances
	result.setMinimumNumberInstances(0);

	return result;
    }

    /**
     * Builds LSID3 decision tree classifier.
     *
     * @param data the training data
     * @exception Exception if classifier can't be built successfully
     */
    @Override
    public void buildClassifier(Instances data) throws Exception
    {
	//Can classifier handle the data?
	getCapabilities().testWithFail(data);

	//Remove instances with missing class.
	data = new Instances(data);
	data.deleteWithMissingClass();	  
	makeTree(data);	    
    }


    /**
     * Builds an LSID3 tree.
     *
     * @param data the training data
     * @exception Exception if decision tree can't be built successfully
     */
    private void makeTree(Instances data) throws Exception
    {		
	// Check if no instances have reached this node.
	if (data.numInstances() == 0) 
	{
	    nodeData = null;
	    m_Attribute = null;
	    m_ClassValue = Instance.missingValue();
	    m_Distribution = new double[data.numClasses()];
	    return;
	}   

	nodeData = data;

	m_Attribute = chooseAtrribute(data);
	if (isLeaf(data, m_Attribute))
	{
	    m_Attribute = null;
	    m_Distribution = new double[data.numClasses()];

	    Enumeration instEnum = data.enumerateInstances();
	    while (instEnum.hasMoreElements()) 
	    {
		Instance inst = (Instance) instEnum.nextElement();
		m_Distribution[(int) inst.classValue()]++;
	    }


	    Utils.normalize(m_Distribution);

	    m_ClassValue = Utils.maxIndex(m_Distribution);
	    m_ClassAttribute = data.classAttribute();	

	    setNLeaves(1);
	    setNNodes(1);
	    setHeight(0);
	}
	else 
	{	
	    setNLeaves(0);
	    setNNodes(0);
	    setHeight(0);


	    //For nominal attribute split value is NaN.
	    Instances[] splitData = LSID3Utils.splitData(data, m_Attribute, m_splitValue);
	    List<String> exc = addExcludedAttribute(m_Attribute);

	    int numValues = 0;
	    if (m_Attribute.isNominal()) numValues = m_Attribute.numValues();
	    else 
	    {
		numValues = 2; //For numeric attributes, the tree is always binary.
	    }

	    m_Successors = new LSID3[numValues];
	    for (int j = 0; j < numValues; j++) 
	    {
		m_Successors[j] = new LSID3();
		((LSID3)m_Successors[j]).setNts(this.nts);
		((LSID3)m_Successors[j]).setExcludedAttributes(exc);	      


		m_Successors[j].setParent(this);
		m_Successors[j].setAncestors(findChildrenAncestors());

		((LSID3)m_Successors[j]).makeTree(splitData[j]);


		setNLeaves(getNLeaves()+m_Successors[j].getNLeaves());
		setNNodes(getNNodes()+m_Successors[j].getNNodes()); 
		setHeight((m_Successors[j].getHeight() > getHeight())?
			m_Successors[j].getHeight(): getHeight());
	    }


	    setNNodes(1+getNNodes()); 
	    setHeight(1+getHeight());
	}
    }

    public void setOptions(String[] options) throws Exception
    {
	String r = Utils.getOption("nts", options);
	if (r.length() == 0)
	{
	    setNts(0);
	}
	else
	{
	    setNts(Integer.parseInt(r));
	}
	super.setOptions(options);
    }

    /***
     * Chooses attribute with minimal tree size in generated sample.
     * @param data	given data
     * @return	chosen attribute
     */
    private Attribute chooseAtrribute(Instances data)
    {		
	Attribute attr = null;

	//If sample size is 0 then choose attribute like Id3
	if (nts <= 0)
	{
	    attr = id3ChooseAttribute(data);
	}
	//Else then for each attribute generate sample of nts subtrees
	//then choose the attribute x which has the minimal tree size rooted at x.
	else
	{
	    Enumeration attributes = data.enumerateAttributes();

	    int[] sampleTreesMinSizes = new int[data.numAttributes() - 1];
	    double[] splitValues = new double[data.numAttributes() - 1];

	    for (int i = 0; i < sampleTreesMinSizes.length; i++) { sampleTreesMinSizes[i] = Integer.MAX_VALUE; }
	    for (int i = 0; i < splitValues.length; i++) { splitValues[i] = Double.NaN; }

	    this.m_AttIdx = 0;
	    while (attributes.hasMoreElements())
	    {
		Attribute a = (Attribute) attributes.nextElement();
		double splitValue = Double.NaN;
		if (a.isNumeric())
		{
		    splitValue = chooseBestValue(data, a);
		    splitValues[a.index()] = splitValue;
		}
		if (a.isNominal() && !m_excAttributeNames.contains(a.name())
			|| (a.isNumeric() && splitValue != Double.NaN))
		{					
		    sampleTreesMinSizes[a.index()] = 0;
		    Instances[] subsets = LSID3Utils.splitData(data, a, splitValue);				
		    for (Instances subset : subsets)
		    {
			//Tree size is computed by summing up sizes of sons' subtrees.
			sampleTreesMinSizes[a.index()] += sampleSID3(subset, nts);										
		    }
		}
	    }

	    //Choose attribute for which tree size is minimal.
	    m_AttIdx = Utils.minIndex(sampleTreesMinSizes);
	    m_splitValue = splitValues[m_AttIdx];

	    attr = (Attribute) data.attribute(m_AttIdx);
	}

	return attr;
    }

    private boolean isLeaf(Instances data, Attribute attr)
    {
	return LSID3Utils.isConstClass(data)													
		|| (attr.isNominal() && m_excAttributeNames.contains(attr.name())
			|| (attr.isNumeric() 
				&& (m_excAttributeNames.contains(attr.name() + "=" + m_splitValue))));
    }



    public int getNts()
    {
	return this.nts;
    }

    public void setNts(int nts)
    {
	this.nts = nts;
    }

    /**
     * Returns an enumeration describing the available options.
     *
     * Valid options are: <p>
     *
     * -nts <br>
     * Number of trees in the sample.<p>
     *
     * @return an enumeration of all the available options.
     */
    public Enumeration<Option> listOptions() 
    {

	Vector<Option> newVector = new Vector<Option>(1);

	newVector.addElement(new Option("\tNumber of trees in the sample.", "nts", 0, "-nts"));

	return newVector.elements();
    }

    /**
     * Gets the current settings of the Classifier.
     *
     * @return an array of strings suitable for passing to setOptions
     */
    public String [] getOptions() 
    {
	Vector<String> options = new Vector<String>();
	options.add("-nts");
	options.add("" + getNts());

	return options.toArray(new String[options.size()]);
    }

    class BiasedRandomSampleTree
    {
	private int numLeaves;				//Number of leaves in tree in the sample.
	private BiasedRandomSampleTree[] children;	//Children of the current root.
	private Attribute root;				//Current root.
	private double splitValue;			//Split value for numeric attribute.

	private boolean isTreeChanged;			//Sets to true every time when there is some change in tree.
	//Needed for recalculating number of leaves.
	public BiasedRandomSampleTree()
	{
	    numLeaves = 0;
	    root = null;
	    isTreeChanged = true;
	    children = null;
	    splitValue = Double.NaN;
	}

	/***
	 * Builds tree in the sample.
	 * @param data	- given dataset
	 */
	public void buildTree(Instances data)
	{		
	    if (data.numInstances() == 0) 
	    {
		root = null;				
		return;
	    }   

	    root = chooseRootAttribute(data);

	    Instances[] splitData = LSID3Utils.splitData(data, root, splitValue);

	    if (!isLeaf(data, root)
		    && countNonEmptySubsets(splitData) > 1)
	    {
		int numValues = 0;
		if (root.isNominal()) numValues = root.numValues();
		else numValues = 2;

		children = new BiasedRandomSampleTree[numValues];
		for (int i = 0; i < children.length; i++)
		{
		    children[i] = new BiasedRandomSampleTree();
		    children[i].buildTree(splitData[i]);
		}
	    }
	}

	/***
	 * Computes number of leaves in the tree.
	 * @return	number of leaves.
	 */
	public int computeNumLeaves()
	{
	    if (isTreeChanged)
	    {
		if (root == null)
		{
		    return 0;
		}
		if (root != null && children == null)
		{
		    return 1;
		}
		else			  
		{
		    for (BiasedRandomSampleTree child : children)
		    {
			numLeaves += child.computeNumLeaves();				
		    }			   
		}
	    }
	    isTreeChanged = false;
	    return numLeaves;
	}

	/***
	 * Chooses attribute to be root. Based on biased random choosing.
	 * @return	chosen attribute
	 */
	private Attribute chooseRootAttribute(Instances data)
	{
	    double[] infoGains = new double[data.numAttributes() - 1];
	    double[] splitValues = new double[data.numAttributes() - 1];

	    for (int i = 0; i < infoGains.length; i++) { infoGains[i] = Integer.MAX_VALUE; }
	    for (int i = 0; i < splitValues.length; i++) { splitValues[i] = Double.NaN; }

	    Attribute attr = null;

	    Enumeration attributes = data.enumerateAttributes();										
	    while (attributes.hasMoreElements())
	    {
		Attribute a = (Attribute) attributes.nextElement();
		double splitValue = Double.NaN;
		if (a.isNumeric())
		{
		    splitValue = chooseBestValue(data, a);
		    //The split value is put in numeric attribute's place.
		    splitValues[a.index()] = splitValue;
		}
		infoGains[a.index()] = LSID3Utils.computeInfoGain(data, a, splitValue);				
	    }

	    //Choose attribute randomly. Probability is proportional to gain.
	    int m_AttIdx = LSID3Utils.computeRandomIndex(infoGains);
	    splitValue = splitValues[m_AttIdx];

	    attr = (Attribute) data.attribute(m_AttIdx);

	    return attr;		    		   
	}

	/**
	 * Checks if given attribute is leaf.
	 * @param attr
	 * @return
	 */
	private boolean isLeaf(Instances data, Attribute attr)
	{
	    return LSID3Utils.isConstClass(data);				
	}

	/**
	 * Cleans the tree.
	 */
	public void cleanTree()
	{
	    numLeaves = 0;
	    isTreeChanged = true;
	    if (children != null)
	    {
		for (BiasedRandomSampleTree t : children)
		{
		    if (t != null)
		    {
			t.cleanTree();
		    }				
		}
		root = null;
		children = null;
	    }
	}

	private int countNonEmptySubsets(Instances[] subsets)
	{
	    int countNonEmptySubsets = 0;
	    for (Instances subset : subsets)
	    {
		if (subset.numInstances() > 0) countNonEmptySubsets++;
	    }

	    return countNonEmptySubsets;
	}

	private double chooseBestValue(Instances data, Attribute a)
	{			
	    double bestValue = Double.NaN;

	    Enumeration instances = data.enumerateInstances();
	    List<Double> values = new ArrayList<Double>(data.numDistinctValues(a));

	    for (; instances.hasMoreElements(); )		 
	    {
		Instance instance = (Instance) instances.nextElement();						  
		double v = instance.value(a);
		if (!(m_excAttributeNames.contains(a.name() + "=" + v)						  
			&& !values.contains(v)))
		{					  					
		    values.add(v);					 
		}
	    }		    

	    if (!values.isEmpty())
	    {
		double[] infoGains = new double[values.size()];
		for (int i = 0; i < values.size(); i++)
		{
		    double v = values.get(i);
		    infoGains[i] = LSID3Utils.computeInfoGain(data, a, v);
		}

		//Choose at random values.
		//The probability of choosing is proportional to values' gain.
		int valueIdx = LSID3Utils.computeRandomIndex(infoGains);

		bestValue = values.get(valueIdx);				  
	    }
	    return bestValue;
	}

	public Attribute getRoot() 
	{
	    return root;
	}

	public BiasedRandomSampleTree[] getChildren() 
	{
	    return children;
	}	
    }

    private Attribute id3ChooseAttribute(Instances data)
    {
	// Compute attribute with maximum information gain.
	double[] infoGains = new double[data.numAttributes()];
	Enumeration attEnum = data.enumerateAttributes();
	while (attEnum.hasMoreElements()) 
	{
	    Attribute att = (Attribute) attEnum.nextElement();
	    infoGains[att.index()] = LSID3Utils.computeInfoGain(data, att, Double.MAX_VALUE);
	}

	return (Attribute) data.attribute(Utils.maxIndex(infoGains));
    }

    /***
     * Compute minimal size of the tree in the sample
     * @param data
     * @return
     */
    private int sampleSID3(Instances data, int r)
    {
	BiasedRandomSampleTree t = new BiasedRandomSampleTree();
	t.buildTree(data);
	int minSize = t.computeNumLeaves();
	
	t.cleanTree();

	for (int i = 1; i < r; i++)
	{
	    t.buildTree(data);
	    
	    int size = t.computeNumLeaves();			
	    minSize = size < minSize ? size : minSize;
	    t.cleanTree();
	}

	return minSize;
    }

    //For debug.
    private void printTree(BiasedRandomSampleTree node, int numIndents) 
    {
	if (node.getRoot() == null) return;

	for (int i = 0; i < numIndents; i++)
	{
	    System.out.print(" ");
	}
	BiasedRandomSampleTree[] children = node.getChildren();
	if (children != null)
	{
	    for (BiasedRandomSampleTree child : children)
	    {
		printTree(child, numIndents + 1);
	    }
	}
    }	

    /**
     * Classifies a given test instance using the decision tree.
     *
     * @param instance the instance to be classified
     * @return the classification
     * @throws Exception 
     */
    public double classifyInstance(Instance instance) throws Exception 
    {
	double classification = 0.0;
	if (instance.hasMissingValue()) 
	{
	    throw new NoSupportForMissingValuesException("LSID3: no missing values, "
		    + "please.");
	}

	if (m_Attribute == null) 
	{
	    classification = m_ClassValue;
	} 
	else if (m_Attribute.isNominal()) 
	{
	    classification = m_Successors[(int) instance.value(m_Attribute)].
		    classifyInstance(instance);
	}
	else if (m_Attribute.isNumeric())
	{
	    LSID3 side = null;
	    if (instance.value(m_Attribute) <= m_splitValue)
	    {
		//Left subtree.
		side = (LSID3)m_Successors[0];
	    }
	    else
	    {
		//Right subtree;
		side = (LSID3)m_Successors[1];
	    }
	    classification = side.classifyInstance(instance);
	}
	return classification;
    }

    /**
     * Main method.
     *
     * @param args the options for the classifier
     */
    public static void main(String[] args) 
    {
	runClassifier(new LSID3(), args);
    }

    //	public String toString()
    //	{
    //		return "Number of leaves: " + measureNumLeaves(); 
    //	}

    /**
     * Computes class distribution for instance using decision tree.
     *
     * @param instance the instance for which distribution is to be computed
     * @return the class distribution for the given instance
     * @throws Exception 
     */
    public double[] distributionForInstance(Instance instance) throws Exception 
    {
	double[] distribution = null;
	if (instance.hasMissingValue()) 
	{
	    throw new NoSupportForMissingValuesException("LSID3: cannot support data with missing values.");
	}
	if (m_Attribute == null) 
	{
	    distribution = m_Distribution;
	} 
	else if (m_Attribute.isNominal())
	{ 
	    distribution = m_Successors[(int) instance.value(m_Attribute)].
		    distributionForInstance(instance);
	}
	else
	{
	    LSID3 side = null;
	    if (instance.value(m_Attribute) <= m_splitValue)
	    {
		//Left subtree.
		side = (LSID3)m_Successors[0];
	    }
	    else
	    {
		//Right subtree;
		side = (LSID3)m_Successors[1];
	    }
	    distribution = side.distributionForInstance(instance);	    	
	}
	return distribution;
    }

    /***
     * Adds an attribute to the list of excluded attributes.
     * @param m_Attribute	the attribute to add
     * @return
     */
    private List<String> addExcludedAttribute(Attribute m_Attribute)
    {
	List<String> exc = new ArrayList<String>(m_excAttributeNames);		
	if (m_Attribute.isNominal())
	{
	    exc.add(m_Attribute.name());
	}
	else if (m_Attribute.isNumeric())
	{
	    exc.add(m_Attribute.name() + "=" + m_splitValue);
	}

	return exc;
    }

    private void setExcludedAttributes(List<String> exc)
    {
	this.m_excAttributeNames = exc;
    }

    /***
     * Chooses best split value for a numeric attribute.
     * @param data	current dataset
     * @param a	current numeric attribute
     * @return
     */
    private double chooseBestValue(Instances data, Attribute a)
    {		  
	double bestValue = Double.NaN;			
	List<Double> values = new ArrayList<Double>();
	Enumeration instances = data.enumerateInstances();

	for (; instances.hasMoreElements(); )		 
	{
	    Instance instance = (Instance) instances.nextElement();

	    double v = instance.value(a);
	    if (!(m_excAttributeNames.contains(a.name() + "=" + v))					  
		    && !values.contains(v))
	    {				
		values.add(v);
	    }
	}		
	if (!values.isEmpty())
	{
	    int sampleSize = 3;
	    int[] sampleTreesMinSizes = new int[sampleSize];
	    for (int i = 0; i < sampleTreesMinSizes.length; i++) { sampleTreesMinSizes[i] = Integer.MAX_VALUE; }


	    double[] infoGains = new double[values.size()];			  			  
	    for (int i = 0; i < values.size(); i++)
	    {
		double v = values.get(i);
		infoGains[i] = LSID3Utils.computeInfoGain(data, a, v);
	    }

	    //Choose at random values in sample and compute value which gives minimal tree size.
	    //The probability of choosing is proportional to values' gain.
	    //Sampling now is done by randomly choosing k values.
	    //For each chosen value we generate only one tree.
	    for (int i = 0; i < sampleSize; i++)
	    {
		int valueIdx = LSID3Utils.computeRandomIndex(infoGains);
		double v = values.get(valueIdx);

		if (!(m_excAttributeNames.contains(a.name() + "=" + v)))						  
		{				  
		    Instances[] subsets = LSID3Utils.splitData(data, a, v);

		    sampleTreesMinSizes[i] = 0;
		    for (Instances subset : subsets)
		    {
			//Tree size is computed by summing up sizes of sons' subtrees.
			sampleTreesMinSizes[i] += sampleSID3(subset, 1);
		    }
		}
	    }		  		  

	    bestValue = values.get(Utils.minIndex(sampleTreesMinSizes));			    
	}		

	return bestValue;
    }

    /**
     * @return data that reached this node
     */
    @Override
    public Instances getExamples()
    {
	return m_Examples;
    }

    /**
     * @return data that reached this node
     */
    @Override
    public IIDTTreeClass[] getSuccessors()
    {
	return m_Successors;
    }

    /**
     * @return attributes that are not ancestors of the node
     */
    @Override
    //	public Attribute[] getAncestors()
    //	{
    //		Attribute[] ancestors = new Attribute[m_Examples.numAttributes()];
    //
    //		Enumeration attributes = m_Examples.enumerateAttributes();
    //		int i = 0;
    //		while (attributes.hasMoreElements()) 
    //		{
    //			Attribute a = (Attribute) attributes.nextElement();
    //			if (m_excAttributeNames.contains(a.name()))
    //			{
    //				ancestors[i++] = a;
    //			}	 	      
    //		}
    //
    //		return ancestors;
    //	}

    /**
     * @return number of times the contract decision tree induction algorithm 
     * was invoked for the node (as described in "Anytime Learning of Decision
     * Trees" by Esmeir and  Markovitch).
     */
    public int getLastR()
    {
	return nts;
    }

    /**
     * @return number of times the contract decision tree induction algorithm 
     * was invoked for the node (as described in "Anytime Learning of Decision
     * Trees") by Esmeir and  Markovitch
     */
    public int getNextR()
    {
	return 2 * nts;
    }

    /***
     * Return number of leaves in the main tree.
     * @return	number of leaves.
     */
    public int measureNumLeaves()
    {
	if (isLSID3TreeChanged)
	{
	    if (m_Successors != null)
	    {
		for (LSID3 t : (LSID3[])m_Successors)
		{
		    if (t != null)
		    {
			numLeaves += 1 + t.measureNumLeaves();
		    }
		}
	    }
	}

	isLSID3TreeChanged = false;
	return numLeaves;
    }

    /**
     * @return number of nodes in the tree rooted by the node (including the node)
     */
    public int measureNumNodes()
    {
	return -1;
    }

    /**
     * @return height of the tree rooted by the node
     */
    public int measureHeight()
    {
	return -1;
    }

    @Override
    public void setR(int r) {
	nts = r;		
    }
    /**
     * Computes information gain for an attribute.
     *
     * @param data the data for which info gain is to be computed
     * @param att the attribute
     * @return the information gain for the given attribute and data
     * @throws Exception if computation fails
     */
    //	private double computeInfoGain(Instances data, Attribute att) 
    //			throws Exception {
    //
    //		double infoGain = LSID3Utils.computeEntropy(data);
    //
    //		Instances[] splitData = LSID3Utils.splitData(data, att, m_splitValue);
    ////		Instances[] splitData = splitData(data, att);
    //		for (int j = 0; j < att.numValues(); j++) {
    //			if (splitData[j].numInstances() > 0) {
    //				infoGain -= ((double) splitData[j].numInstances() /
    //						(double) data.numInstances()) *
    //						computeEntropy(splitData[j]);
    //			}
    //		}
    //		return infoGain;
    //	}
    //
    //	/**
    //	 * Computes the entropy of a dataset.
    //	 * 
    //	 * @param data the data for which entropy is to be computed
    //	 * @return the entropy of the data's class distribution
    //	 * @throws Exception if computation fails
    //	 */
    //	private double computeEntropy(Instances data) throws Exception {
    //
    //		double [] classCounts = new double[data.numClasses()];
    //		Enumeration instEnum = data.enumerateInstances();
    //		while (instEnum.hasMoreElements()) {
    //			Instance inst = (Instance) instEnum.nextElement();
    //			classCounts[(int) inst.classValue()]++;
    //		}
    //		double entropy = 0;
    //		for (int j = 0; j < data.numClasses(); j++) {
    //			if (classCounts[j] > 0) {
    //				entropy -= classCounts[j] * Utils.log2(classCounts[j]);
    //			}
    //		}
    //		entropy /= (double) data.numInstances();
    //		return entropy + Utils.log2(data.numInstances());
    //	}
    //	
}
