// dtree.c
// -------
// Eric Huang
// Bohao (Dan) Pan

#include <cmath>
#include <cstdio>
#include <string>
#include <cstdlib>
#include <iostream>
#include <sstream>
#include "dtree_util.h"
#include "dtree.h"
#include <time.h>

using namespace std;

// our function declarations
void CrossValidateBoosting(int k, int numRounds, int learnerDepth, int testSetSize, DatasetDescription DD, inst *dataset, int n, double &training_perf, double &testing_perf);
void CrossValidate(int k, int pruneFlag, int valSetSize, int testSetSize, DatasetDescription DD, inst *dataset, int n, double &training_perf, double &testing_perf);
void SingleStump(int testSetSize, DatasetDescription DD, inst *dataset, int n);
DecisionTree* PostPrune(DatasetDescription DD, DecisionTree *node, inst *dataset, int n, inst *valset, int valSetSize);
void ShuffleInst(inst * dataset, int begin, int end);

Hypotheses *AdaBoost(DatasetDescription DD, inst *dataset, int n, int numRounds, int learnerDepth);
int ClassifyBoosting(DatasetDescription DD, Hypotheses *hh, int attribs[]);


// main
// ----
// The main program loop
// You should modify this function to run your experiments
int main(int argc, char **argv)
{
  int noisyFlag;	// True iff noisy data is to be used
  int pruneFlag;	// True iff pruning should be done
  int boostFlag;  // Whether or not we should use boosting
  int learnerDepth;  // The depth of weak learners (only used for boosting)
  int valSetSize;	// Size of validation set to use for pruning
  int numRounds;  // The number of rounds to use for boosting
  FILE *datafile;	// The datafile being used
  inst dataset[DATASIZE * 2];	// Two consecutive copies of the dataset
  DatasetDescription DD;		// The DatasetDescription for this dataset

  // Set DD to describe this particular dataset: 9 attribs that go from 
  // 1 to 10, and a boolean classification (0 or 1)
  DD.numattribs = 9;
  DD.range = (int *) malloc(DD.numattribs * sizeof(int));
  for(int i = 0; i < DD.numattribs; i++) {
    DD.range[i] = 10;
  }
  
  DD.numclasses = 2;

  // Parse the command line
  if (ValidateInput(argc, argv,
          noisyFlag, pruneFlag, valSetSize, boostFlag, learnerDepth, numRounds) < 0) {
    cout << "usage: dtree <-n> <-p valSetSize> <-b boostRounds> <-d weakLearnerDepth>" << endl;
	  exit(1);
  }

  // Read in the data file
  if (noisyFlag) {
    datafile = fopen(NOISYFILE, "r");

  } else {
    datafile = fopen(DATAFILE, "r");
  }

  if (datafile == NULL) {
    cout << "Unable to open data file" << endl;
	  exit(1);
  }

  if (ParseInput(DD, dataset, DATASIZE, datafile) < 0) {
    cout << "Error reading data file" << endl;
	  exit(1);
  }
  
  fclose(datafile);

  // Copy the dataset so we have two copies of it 
  for(int i = 0; i < DATASIZE; i++) {
	dataset[i + DATASIZE] = dataset[i];
  }


  /* ====================================
   * WRITE CODE FOR YOUR EXPERIMENTS HERE
   * ====================================
   */
	// constant seed for pseudorandom results but consistency for easy testing
	srand (100);
	
	// cross validation
	int testSetSize = 10;
	double training_perf, testing_perf, training_perf_sum = 0, testing_perf_sum = 0;
	if (boostFlag != 0) {
		for (int i = 0; i < 10; i++) {
			//cout << "\n(Boosting) Trial " << i+1 <<endl;
			CrossValidateBoosting(10, numRounds, learnerDepth, testSetSize, DD, dataset, DATASIZE, training_perf, testing_perf);
			training_perf_sum += training_perf;
			testing_perf_sum += testing_perf;
		}
	}
	else if (pruneFlag != 0) {
		for (int i = 0; i < 10; i++) {
			cout << "\n(Pruning) Trial " << i+1 <<endl;
			CrossValidate(10, pruneFlag, valSetSize, testSetSize, DD, dataset, DATASIZE, training_perf, testing_perf);
			training_perf_sum += training_perf;
			testing_perf_sum += testing_perf;
		}
	}
	else {
		for (int i = 0; i < 10; i++) {
			cout << "\n(CV) Trial " << (i+1) <<endl;
			CrossValidate(10, pruneFlag, 0, testSetSize, DD, dataset, DATASIZE, training_perf, testing_perf);
			training_perf_sum += training_perf;
			testing_perf_sum += testing_perf;
		}
	}
    
    // Used to generate tree for No. 4 -- a single decision stump
    //SingleStump(testSetSize, DD, dataset, DATASIZE);
    
	cout << "\nAverage Training Performance: " << training_perf_sum / 10 <<endl;
	cout << "Average Testing Performance: " << testing_perf_sum / 10 <<endl;	
	
  free(DD.range);
  return 0;
}


// SingleStump
// ------------------------
// Used to generate tree for No. 4 -- a single decision stump
void SingleStump(int testSetSize, DatasetDescription DD, inst *dataset, int n) {
    // shuffle dataset
	ShuffleInst(dataset, 100, 199);
	int trainingSetSize = n - testSetSize;
	double training_perf = 0.0;
	double test_perf = 0.0;

	DecisionTree *tree = Learn(DD, dataset+DATASIZE, trainingSetSize, 1);
    // classify instances in training set
    int correct = 0;
    for (int v = 0; v < trainingSetSize; v++) {
        inst instance = dataset[DATASIZE + v];
        if (Classify(DD, tree, instance.attribs) == instance.classifier) {
            correct++;
        }
    }
    training_perf += (double) correct / trainingSetSize;
    
    // classify instances in test set
    correct = 0;
    for (int v = 0; v < testSetSize; v++) {
        inst instance = dataset[2*DATASIZE - testSetSize + v];
        if (Classify(DD, tree, instance.attribs) == instance.classifier) {
            correct++;
        }
    }
    // add up accuracy first
    test_perf += (double) correct / testSetSize;
    
    cout << "Root attribute: " << tree->attrib << endl;
    int attribTest[] = {0, 0, 1, 0, 0, 0, 0, 0, 0};
    for(; attribTest[tree->attrib] <= 10; attribTest[tree->attrib]++) {
        cout << attribTest[tree->attrib] << " -> " << Classify(DD, tree, attribTest) << endl;
    }
    
    FreeTree(DD, tree);
	
	cout << "Training Performance: " << training_perf <<endl;
	cout << "Test Performance: " << test_perf <<endl;
}

// CrossValidate
// -------------
// Perform k-fold cross validation
// | training | validation | test |
void CrossValidate(int k, int pruneFlag, int valSetSize, int testSetSize, DatasetDescription DD, inst *dataset, int n, double &training_perf_ref, double &testing_perf_ref) {
	// shuffle dataset
	ShuffleInst(dataset, 100, 199);
	
	int trainingSetSize = n - valSetSize - testSetSize;
	double training_perf = 0.0;
	double test_perf = 0.0;
	for (int i = 0; i < k; i++) {
		DecisionTree *tree = Learn(DD, dataset+DATASIZE, trainingSetSize, -1);
		if (pruneFlag) {
			inst *valset = dataset + 2*DATASIZE - valSetSize - testSetSize;
			tree = PostPrune(DD, tree, dataset+DATASIZE, trainingSetSize, valset, valSetSize);
		}
		
		// classify instances in training set
		int correct = 0;
		for (int v = 0; v < trainingSetSize; v++) {
			inst instance = dataset[DATASIZE + v];
			if (Classify(DD, tree, instance.attribs) == instance.classifier) {
				correct++;
			}
		}
		training_perf += (double) correct / trainingSetSize;
		
		// classify instances in test set
		correct = 0;
		for (int v = 0; v < testSetSize; v++) {
			inst instance = dataset[2*DATASIZE - testSetSize + v];
			if (Classify(DD, tree, instance.attribs) == instance.classifier) {
				correct++;
			}
		}
		// add up accuracy first
		test_perf += (double) correct / testSetSize;
		
		// re-order 2nd copy of datasize for next iteration
		for (int j = 0; j < DATASIZE; j++) {
			dataset[(j + i*testSetSize)%DATASIZE + DATASIZE] = dataset[j];
		}
		
		FreeTree(DD, tree);
	}
	// get average performance
	training_perf /= k;
	test_perf /= k;
	
	cout << "Training Performance: " << training_perf <<endl;
	cout << "Test Performance: " << test_perf <<endl;
	
	training_perf_ref = training_perf;
	testing_perf_ref = test_perf;
}

// CrossValidateBoosting
// -------------
// Perform k-fold cross validation
// | training | test |
void CrossValidateBoosting(int k, int numRounds, int learnerDepth, int testSetSize, DatasetDescription DD, inst *dataset, int n, double &training_perf_ref, double &testing_perf_ref) {
	// shuffle dataset
	ShuffleInst(dataset, 100, 199);
	
	int trainingSetSize = n - testSetSize;
	double training_perf = 0.0;
	double test_perf = 0.0;
	for (int i = 0; i < k; i++) {
		Hypotheses *hyp = AdaBoost(DD, dataset+n, trainingSetSize, numRounds, learnerDepth);
        if (hyp == NULL) {
            break;
        }
		
		// classify instances in training set
		int correct = 0;
		for (int v = 0; v < trainingSetSize; v++) {
			inst instance = dataset[n + v];
			if (ClassifyBoosting(DD, hyp, instance.attribs) == instance.classifier) {
				correct++;
			}
		}
		training_perf += (double) correct / trainingSetSize;
		
		// classify instances in test set
		correct = 0;
		for (int v = 0; v < testSetSize; v++) {
			inst instance = dataset[2*n - testSetSize + v];
			if (ClassifyBoosting(DD, hyp, instance.attribs)  == instance.classifier) {
				correct++;
           	}
		}
		// add up accuracy first
		test_perf += (double) correct / testSetSize;
		
		// re-order 2nd copy of datasize for next iteration
		for (int j = 0; j < DATASIZE; j++) {
			dataset[(j + i*testSetSize)%DATASIZE + DATASIZE] = dataset[j];
		}
		
		// reset weight
		for (int j = 0; j < n; j++) {
			dataset[n + j].weight = 1.0/n;
		}
		
		FreeHypotheses(DD, hyp);
	}
	// get average performance
	training_perf /= k;
	test_perf /= k;
	
	//cout << "Training Performance: " << training_perf <<endl;
	//cout << "Test Performance: " << test_perf <<endl;
	
	training_perf_ref = training_perf;
	testing_perf_ref = test_perf;
}

// PostPrune
// ---------
// Bottom-up validation set pruning
DecisionTree* PostPrune(DatasetDescription DD, DecisionTree *node, inst *dataset, int n, inst *valset, int valSetSize) {
	// if reached leaf node
	if (node->classifier != NONE)
		return node;
		
	// prune children first
	int attrib = node->attrib;
	
  // Allocate memory for a temporary dataset
  inst *tempDataset = (inst *) malloc(n * sizeof(inst));

	// Loop through each possible value of the attribute
  for(int i = 0; i < DD.range[attrib]; i++) {
	// Store all instances in the dataset where the attribute
	// has the current value in the temporary dataset
	// (Remember that we need to subtract one from each value in
	// order to make them zero-indexed)
		int size = 0;
		for(int j = 0; j < n; j++) {
		  if(dataset[j].attribs[attrib] == i + 1) {
		    tempDataset[size++] = dataset[j];
		  }
		}
		
		node->children[i] = PostPrune(DD, node->children[i], tempDataset, size, valset, valSetSize);
	}

	// Count the number of times each classification appears in the data
  float *classCount = CountClasses(DD, dataset, n);

  // Find the most common classification
  int mostCommonClass = 0;
  for(int i = 1; i < DD.numclasses; i++) {
    if(classCount[i] > classCount[mostCommonClass])
	  	mostCommonClass = i;
  }

	// calculate performance of subtree
	int correct = 0, correct2 = 0;
	for (int v = 0; v < valSetSize; v++) {
		inst instance = valset[v];
		// performance of subtree
		if (Classify(DD, node, instance.attribs) == instance.classifier) {
			correct++;
		}
		// performance of leaf with most common class
		if (mostCommonClass == instance.classifier)
			correct2++;
	}
	
	// prune
	if (correct2 >= correct) {
		FreeTree(DD, node);
		return MakeLeaf(mostCommonClass);
	}
	else {
		return node;
	}
	
}

// AdaBoost
// --------
// Implementation of AdaBoost
Hypotheses *AdaBoost(DatasetDescription DD, inst *dataset, int n, int numRounds, int learnerDepth) {
    if (numRounds <= 0) {
        return NULL;
    }

	Hypotheses *hh = (Hypotheses *) malloc(sizeof(Hypotheses));
	Hypothesis *h = (Hypothesis *) malloc(numRounds * sizeof(Hypothesis));
	for (int i = 0; i < numRounds; i++) {
		h[i].tree = Learn(DD, dataset, n, learnerDepth);
		
		// compute error
		float error = 0.0;
		for (int v = 0; v < n; v++) {
			inst instance = dataset[v];
			if (Classify(DD, h[i].tree, instance.attribs) != instance.classifier) {
				error += instance.weight;
			}
		}
		if (error == 0.0) {
			Hypothesis *h_k = (Hypothesis *) malloc(sizeof(Hypothesis));
			h_k->tree = h[i].tree;
			
			// free previously generated trees
			for (int k = 0; k < i; k++)
				FreeTree(DD, h[k].tree);
			free(h);
			
			hh->h = h_k;
			hh->num_h = 1;
			return hh;
		}
		//cout << "error " <<error <<endl;
		
		// compute alpha
		float alpha = 1.0/2 * log((1.0-error)/error);
		//cout << "alpha " << alpha <<endl;	
		// store alpha
		h[i].alpha = alpha;
		
		float e_a = exp(alpha);
		float e_na = exp(-alpha);
		
		// update weights
		for (int v = 0; v < n; v++) {
			if (Classify(DD, h[i].tree, dataset[v].attribs) == dataset[v].classifier) {
				dataset[v].weight *= e_na;
			}
			else {
				dataset[v].weight *= e_a;
			}
		}
		
		// normalize weights
		float Z = 0.0;
		for (int v = 0; v < n; v++) {
			Z += dataset[v].weight;
		}
		for (int v = 0; v < n; v++) {
			dataset[v].weight /= Z;
		}
	}
	
	hh->h = h;
	hh->num_h = numRounds;
	return hh;
}

// ClassifyBoosting
// -----------------------
// Classify an instance using a weighted set of decision trees
int ClassifyBoosting(DatasetDescription DD, Hypotheses *hh, int attribs[]) {
    
    // Initialize class counts
    float *classCount = (float *) (malloc(DD.numclasses * sizeof(float)));
    for(int i = 0; i < DD.numclasses; i++) {
        classCount[i] = 0; 
    }
    
    // Count classes
	for (int i = 0; i < hh->num_h; i++) {
		classCount[Classify(DD, hh->h[i].tree, attribs)] += hh->h[i].alpha;
	}
    
	// Find the most common classification
    int mostCommonClass = 0;
    for(int i = 1; i < DD.numclasses; i++) {
        if(classCount[i] > classCount[mostCommonClass]) {
            mostCommonClass = i;
        }
    }

	return mostCommonClass;
}

// ShuffleInst
// -------------------
// Randomly reorders instances [begin ... end] of dataset
void ShuffleInst(inst * dataset, int begin, int end) {
	int r;
	for (int i = begin; i <= end; i++) {
		r = i + rand() % (end - i + 1);
		inst temp = dataset[i];
		dataset[i] = dataset[r];
		dataset[r] = temp;
	}
	
	return;
}


//////////////////////////////////////////////////////////////////////////

// Classify
// --------
// Use a decision tree to classify a data instance
int Classify(DatasetDescription DD, DecisionTree *node, int attribs[])
{
  // If the current node is a leaf, return its classification
  if(node->classifier != NONE) {
    return node->classifier;
  
  // Check that the attribute is valid
  } else if(node->attrib < 0 || node->attrib > DD.numattribs) {
    cout << "Classify: Error in decision tree" << endl;
	exit(1);
  
  // Get the value of the attrib that the node splits on (subtract 
  // 1 to get the zero-indexed value) and call Classify recursively 
  // on this child
  } else {
    int value = attribs[node->attrib] - 1;
	return Classify(DD, node->children[value], attribs);
  }
}

// Learn
// -----
// Learn a decision tree from the given dataset
DecisionTree *Learn(DatasetDescription DD, inst *dataset, int n, int learnerDepth)
{
  // Allocate an array to keep track of which attribs have been used
  // and initialize this array so all attribs are unused
  int *usedAttribs = (int *) malloc(DD.numattribs * sizeof(int));
  for(int i = 0; i < DD.numattribs; i++) {
    usedAttribs[i] = 0;
  }

  // Use the ID3 algorithm to build a decision tree
  DecisionTree *tree = ID3(DD, dataset, usedAttribs, n, learnerDepth);
  free(usedAttribs);
  return tree;
}

// ID3
// ---
// Implementation of the ID3 algorithm
DecisionTree *ID3(DatasetDescription DD, inst *dataset, 
int *usedAttribs, int n, int depth)
{
  // Count the number of times each classification appears in the data
  float *classCount = CountClasses(DD, dataset, n);

	// get N
	double N = 0.0;
	for(int i = 0; i < DD.numclasses; i++) {
		N += classCount[i];
  }
  
  // If all classifications are the same, return a leaf with 
  // that classification
  for(int i = 0; i < DD.numclasses; i++) {
    if(classCount[i] == N) {
          free(classCount);
          return MakeLeaf(i);
	}
  }

  // Find the most common classification
  int mostCommonClass = 0;
  for(int i = 1; i < DD.numclasses; i++) {
    if(classCount[i] > classCount[mostCommonClass]) {
	  mostCommonClass = i;
	}
  }

  // Count the number of usable attribs
  int attribCount = 0;
  for(int i = 0; i < DD.numattribs; i++) {
    if(!usedAttribs[i]) {
	  attribCount++;
	}
  }

  // If there are no attribs left or depth reached,
	// return a leaf with the most common classification
  if(attribCount == 0 || depth == 0) {
    return MakeLeaf(mostCommonClass);
  }
  
  // Find the best attrib to split on 
  int attrib = BestAttrib(DD, dataset, usedAttribs, n);

  // If no attribute had positive gain, return the most common
  // classification
  if(attrib == NONE) {
    return MakeLeaf(mostCommonClass);
  }

  // Make a node that splits on this attrib 
  DecisionTree *node = MakeNode(DD, attrib);
	
  // Copy the list of used attributes into a temporary list
  int *tempUsedAttribs = (int *) malloc(DD.numattribs * sizeof(int));
  for(int i = 0; i < DD.numattribs; i++) {
    tempUsedAttribs[i] = usedAttribs[i];
  }

  // Mark the attribute we are splitting on as used
  tempUsedAttribs[attrib] = 1;

  // Allocate memory for a temporary dataset
  inst *tempDataset = (inst *) malloc(n * sizeof(inst));

  // Loop through each possible value of the attribute
  for(int i = 0; i < DD.range[attrib]; i++) {
	// Store all instances in the dataset where the attribute
	// has the current value in the temporary dataset
	// (Remember that we need to subtract one from each value in
	// order to make them zero-indexed)
		int size = 0;
		for(int j = 0; j < n; j++) {
		  if(dataset[j].attribs[attrib] == i + 1) {
		    tempDataset[size++] = dataset[j];
		  }
		}
	
		// If the temporary dataset is empty, create a child leaf 
		// with the most common classification
		if(size == 0) {
		  node->children[i] = MakeLeaf(mostCommonClass);
	  
		// Otherwise, create a child node by calling ID3 recursively
		} else {
		  node->children[i] = ID3(DD, tempDataset, tempUsedAttribs, size, depth-1);
		}	
  }
	
  free(classCount);
  free(tempUsedAttribs);
  free(tempDataset);
  return node;
}

// CountClasses
// ------------
// Generate an array that stores the frequency of each classification
// within the given dataset
float *CountClasses(DatasetDescription DD, inst *dataset, int n)
{
  float *classCount = (float *) (malloc(DD.numclasses * sizeof(float)));
  for(int i = 0; i < DD.numclasses; i++) {
    classCount[i] = 0; 
  } 
  
  for(int i = 0; i < n; i++) {
    (classCount[dataset[i].classifier]) += dataset[i].weight;
  } 

  return classCount;
}

// BestAttrib
// ----------
// Find the best attribute to split on for a dataset
int BestAttrib(DatasetDescription DD, inst *dataset, int *usedAttribs, int n)
{
  int i, attrib;
  double entropy, gain, maxGain; 
  
  // Calculate the entropy of the data set
  entropy = Entropy(DD, dataset, n);
  
  // Initialize the attrib and maxGain variables
  attrib = NONE;
  maxGain = 0; 
  
  // Loop through each attribute in the data set
  for(i = 0; i < DD.numattribs; i++) {
    // Check that the attribute is usable
	if(!usedAttribs[i]) {
	  // Calculate the gain of the attribute 
	  gain = entropy - Remainder(i, DD, dataset, n); 
	  
	  // Compare this value against maxGain
	  if(gain > maxGain) {
	    maxGain = gain;
		attrib = i;
	  }
	}	
  } 
  
  // Return the attribute with the highest gain
  return attrib;
}

// Remainder
// ---------
// Calculate the remainder after splitting on an attribute
double Remainder(int attrib, DatasetDescription DD, inst *dataset, int n)
{
  double remainder, entropy; 
  
  // Allocate memory for a temporary dataset
  inst *tempDataset = (inst *) malloc(n * sizeof(inst)); 
  
  // Initialize the remainder 
  remainder = 0; 
  
  // Loop through each possible value of the attribute
  for(int i = 0; i < DD.range[attrib]; i++) {
	// Store all instances in the dataset where the attribute
	// has the current value in the temporary dataset
	// Remember that attribute values in the dataset need to be
	// transformed from one-indexed to zero-indexed
	int size = 0;
	double N_x = 0.0;	// 
	double N = 0.0;		// total weight
	for(int j = 0; j < n; j++) {
	  if(dataset[j].attribs[attrib] == i + 1) {
	    tempDataset[size++] = dataset[j];
			N_x += dataset[j].weight;
	  }
		N += dataset[j].weight;
	} 
	
	// Check that this subset is not empty
	if(size > 0) {
	  // Calculate the entropy of the subset
	  entropy = Entropy(DD, tempDataset, size); 
	  
	  // Update the remainder
	  remainder += entropy * (double) N_x / (double) N;
	}
  } 
  
  free(tempDataset);
  return remainder;
}


// Entropy
// -------
// Calculate the entropy of a dataset
double Entropy(DatasetDescription DD, inst *dataset, int n) 
{
  double entropy = 0;

  // Count the number of times each classification appears in the data
  float *classCount = CountClasses(DD, dataset, n);

	// get N
	double N = 0.0;
	for (int i = 0; i < DD.numclasses; i++) {
		N += classCount[i];
	}

  // Calculate the entropy of the data
  for(int i = 0; i < DD.numclasses; i++) {
    if(classCount[i] > 0) {
	  	double prob = (double) classCount[i] / N;
		  entropy -= prob * log2(prob);
		}
  }

  free(classCount);
  return entropy;
}	


// MakeLeaf
// --------
// Create a leaf node
DecisionTree *MakeLeaf(int classifier) 
{
  DecisionTree *leaf = (DecisionTree *) malloc(sizeof(DecisionTree));

  leaf->attrib = NONE;
  leaf->classifier = classifier;
  leaf->children = NULL;

  return leaf;
}	


// MakeNode
// --------
// Create an internal tree node w/ children initialized to NULL
DecisionTree *MakeNode(DatasetDescription DD, int attrib) 
{
  DecisionTree *node = (DecisionTree *) malloc(sizeof(DecisionTree)); 
  
  node->attrib = attrib;
  node->classifier = NONE;
  node->children = 
  (DecisionTree **) malloc(DD.range[attrib] * sizeof(DecisionTree *)); 
  for(int i = 0; i < DD.range[attrib]; i++) {
    node->children[i] = NULL;
  }
  
  return node;
}

// FreeTree
// --------
// Free all memory allocated to a decision tree
void FreeTree(DatasetDescription DD, DecisionTree *tree) {
  // Check that the tree is not empty
  if(tree != NULL) {
    // If the tree is not a leaf, free its children
    if(tree->classifier == NONE) {
      for(int i = 0; i < DD.range[tree->attrib]; i++) {
	    FreeTree(DD, tree->children[i]);
	  }
      free(tree->children);
    }
    // Free the tree itself
    free(tree);
  }
}

void FreeHypothesis(DatasetDescription DD, Hypothesis *h, int num_trees) {
	for (int i = 0; i < num_trees; i++) {
		FreeTree(DD, h[i].tree);
	}
	free(h);
}


void FreeHypotheses(DatasetDescription DD, Hypotheses *hh) {
	for (int i = 0; i < hh->num_h; i++) {
		FreeTree(DD, hh->h[i].tree);
	}
	free(hh->h);
	free(hh);
}
