"""
	As a matter of convention, each of these methods will take in two parameters.

	dataset - A Dataset object.
"""
import math

def iterative_dichotomiser_3(decisionTree, dataset, attributeNames):
	valueFrequencies = {}
	entropyValues = []
	# Compute the informational gain of each attribute
	for atrIndex in range(len(attributeNames)):
		subsetEntropy = 0.0
		entropy = _calculate_entropy(dataset, atrIndex)

		# Determine the frequencies of each value of the targeted attribute
		for record in dataset:
			if not(record[0][atrIndex] in valueFrequencies):
				valueFrequencies[record[0][atrIndex]] = 0.0
			valueFrequencies[record[0][atrIndex]] += 1.0

		# Compute the entropy of the targeted attribute
		for key in valueFrequencies:
			probability = valueFrequencies[key] / sum(valueFrequencies.values())
			subset = [item for item in dataset if item[0][atrIndex] == key]
			subsetEntropy += probability * _calculate_entropy(subset, atrIndex)

		# Compute and record the informational gain of the targeted attribute
		entropyValues.append(entropy - subsetEntropy)

	# Ignore gain below 1.5
	if max(entropyValues) < 1.5:
		return None

	bestAttributeName = attributeNames[entropyValues.index(max(entropyValues))]
	return bestAttributeName, max(entropyValues)

def _calculate_entropy(dataset, targetAtrIndex):
	valueFrequencies = {}
	entropy = 0.0

	# Count the frequencies of the values of targeted attribute in the dataset
	for record in dataset:
		if (not (record[0][targetAtrIndex] in valueFrequencies)):
			valueFrequencies[record[0][targetAtrIndex]] = 0.0
		valueFrequencies[record[0][targetAtrIndex]] += 1.0

	# Aggregate the partial entropy values of each value of the targeted attribute
	for frequency in valueFrequencies.values():
		entropy += (-frequency/len(dataset)) * math.log(frequency/len(dataset), 2) 
		
	return entropy


def TestMethod(decisionTree, dataset, attributeNames):
	return attributeNames[0]