import math, random
import sys,os

import hmm, numpy #for HMM
import pickle

weigh = lambda sample: sum([instance.weight for instance in sample])

#returns a reinstantiated classifier read from file
def getClassifierFromFile(filename):
	fr = open(filename,'r')
	cfr = pickle.load(fr)
	fr.close()
	return cfr

#writes an trained classifier to file
def writeClassifierToFile(classifier, filename):
	fh = open(filename,'w')
	pickle.dump(classifier, fh)
	fh.close() 

def normalizeWeights(dataSet):
	weightSum = weigh(dataSet)
	for example in dataSet: example.weight /= weightSum

#represents an example in all the classifiers
class Example:
	def __init__(self, features, label = None, weight = 1.0):
		assert type(features) == type(list())
		self.features = features
		self.label = label
		self.weight = weight
	
	def __str__(self): return str(self.label) + "\t" + ("".join([str(feature) for feature in self.features]))

#loads data from the store
class DataSource:
	def __init__(self, path):
		self.priorsByLabel = {}
		self.examples = set()
		if os.path.isfile(path):
			memories = open(path, 'r')
			for memory in memories:
				pieces = memory.strip().split("\t")
				label = pieces[0]
				if label not in self.priorsByLabel: self.priorsByLabel[label] = None
				self.examples.add(Example([int(pieces[1][b]) for b in range(len(pieces[1]))], pieces[0]))
			memories.close()
		for label in self.priorsByLabel:
			self.priorsByLabel[label] = float(len([example for example in self.examples if example.label == label])) / float(len(self.examples))
	
	def __str__(self): return "\n".join([str(example) for example in self.examples])

### ABSTRACT SUPERCLASSES ###
class Classifier:
	def classify(self, instance): return None, 0.0
	
	def train(self, trainingSet): assert type(trainingSet) == type(set())
	
	def test(self, testingSet):
		assert type(testingSet) == type(set())
		return sum([example.weight if self.classify(example)[0] == example.label else 0.0 for example in testingSet]) / weigh(testingSet)
	
	def __str__(self): return "Classifier"

#never got better than 60%
class NaiveBayesClassifier(Classifier):
	def train(self, trainingSet):
		print "TRAINING BAYES...", len(trainingSet)
		
		normalizeWeights(trainingSet)
		self.priorsByLabel = {}
		for example in trainingSet:
			if example.label not in self.priorsByLabel: self.priorsByLabel[example.label] = 0.0
			self.priorsByLabel[example.label] += example.weight
		assert abs(sum(self.priorsByLabel.values()) - 1.0) < 1e-8
		
		assert len(self.priorsByLabel) > 0
		
		self.posteriorsByAttribute = {}
		instanceLength = len(list(trainingSet)[0].features)
		for label in self.priorsByLabel:
			examplesMatching = [example for example in trainingSet if example.label == label]
			self.posteriorsByAttribute[label] = [sum([example.weight for example in examplesMatching if example.features[a] == 1]) for a in range(instanceLength)]
			assert False not in map(lambda v: not v < 0.0 and not v > 1.0, self.posteriorsByAttribute[label])
	
	def classify(self, instance):
		return reduce(lambda (l1, p1), (l2, p2): (l1, p1) if p1 > p2 else (l2, p2), \
		[(label, self.getLabelProbabilityConditional(label, instance)) for label in self.priorsByLabel])
	
	def getLabelProbabilityConditional(self, label, instance):
		return self.priorsByLabel[label] * reduce(lambda p1, p2: p1 * p2, [(attributePosteriorProbability) if (instanceHasAttributeInt == 1) else (1.0 - attributePosteriorProbability) for instanceHasAttributeInt, attributePosteriorProbability in zip(instance.features, self.posteriorsByAttribute[label])])
	
	def __str__(self):
		return "priorsByLabel\t" + " ".join([str(label) + ":" + str(proir) for label, prior in zip(priorsByLabel.keys(), priorsByLabel.values())])

class HMMClassifier(Classifier):
	def __init__(self, statesCount, emissionsCount):
		self.states = range(statesCount)
		self.emmisions = range(emissionsCount)
		self.hmms = set()
	
	def train(self, trainingSet):
		print "TRAINING HMM...", len(trainingSet)
		trainingSetFeatureArraysByLabel = {}
		#breaking training data into lists of numpy arrays of observations (the features) for each label to learn and hmm for each label
		for example in trainingSet: 
			if example.label not in trainingSetFeatureArraysByLabel: trainingSetFeatureArraysByLabel[example.label] = []
			trainingSetFeatureArraysByLabel[example.label].append(numpy.array(example.features))
		#create and train an HMM for each label - trains on obs sequences via BaumWelch
		self.hmms = set([hmm.HMM(self.states, self.emmisions, label, trainingSetFeatureArraysByLabel[label]) for label in trainingSetFeatureArraysByLabel])
	
	def classify(self, instance):
		instanceFeaturesArray = numpy.array(instance.features)
		likelihoodsByLabel = {}
		for hmm in self.hmms: likelihoodsByLabel[hmm.label] = hmm.log_prob_of_sequence(instanceFeaturesArray)
		
		labelMostLikely = reduce( \
		lambda labelMostLikely, labelNext: \
		labelMostLikely if not likelihoodsByLabel[labelMostLikely] < likelihoodsByLabel[labelNext] else labelNext, \
		likelihoodsByLabel.keys() \
		)
		
		return labelMostLikely, likelihoodsByLabel[labelMostLikely] / sum(likelihoodsByLabel.values())

class NearestCentroidClassifier(Classifier):
	#this is an adaptation of k-means using a cluster assignement to classify
	
	def train(self, trainingSet):
		print "TRAINING CENTROID...", len(trainingSet)
		examplesByLabel = {}
		featuresCount = len(list(trainingSet)[0].features)
		#separate by label
		for example in trainingSet: 
			if example.label not in examplesByLabel: examplesByLabel[example.label] = set()
			examplesByLabel[example.label].add(example)                
				
		
		
		self.centroidsByLabel = {}
		#calc centroid for each label in sample set
		for label in examplesByLabel: 
			self.centroidsByLabel[label] = \
			[sum([example.weight * float(example.features[i]) for example in trainingSet]) / weigh(examplesByLabel[label]) for i in range(featuresCount)]
    
	def classify(self, instance):
		def cartesianDistanceNormalized(vector1, vector2):
			assert len(vector1) == len(vector2)
			#distance measured, divided by maximum distance (i.e. from [0, ... 0] to [1, ... 1])
			return sum([float(c1 - c2) ** 2.0 for c1, c2 in zip(vector1, vector2)]) / float(len(vector1))
    
		#take in unlabelled feature vector and return closest 
		distancesByLabel = {}
		for label in self.centroidsByLabel: distancesByLabel[label] = cartesianDistanceNormalized(self.centroidsByLabel[label], instance.features)
		
		labelBest = reduce( \
		lambda best, next: best if not distancesByLabel[best] > distancesByLabel[next] else next, \
		distancesByLabel.keys() \
		)
		
		#return a tuple of best label and 'confidence' - the confidence calc only makes sense for binary classifications
		return labelBest, 1.0 - (distancesByLabel[labelBest] / max(distancesByLabel.values()))

class ID3Classifier(Classifier):
	def __init__(self, maxDepth = 1, defaultLabel = None):
		self.maxDepth = maxDepth
		self.majorityLabel = defaultLabel
		self.prune()
	
	def prune(self):
		self.splitFeature = None
		self.branches = None
		self.majorityWeight = 0.0
		self.isLeaf = True
	
	def train(self, trainingSet):
		print "TRAINING ID3...", len(trainingSet), self.maxDepth if self.maxDepth != None else "(NO MAX)"
		self.prune()
		
		if len(trainingSet) == 0: return
		
		def entropy(sample, labels):
			assert len([problem for problem in sample if problem.weight < 0.0]) == 0
			if len(sample) == 0 or len(labels) == 0: return 0.0
			proportions = [weigh([match for match in sample if match.label == label])/weigh(sample) for label in labels]
			return sum([-proportion * math.log(proportion, len(labels)) if proportion > 0.0 else 0.0 for proportion in proportions]) 
		
		labels = set([example.label for example in trainingSet])
		examplesByLabel = {}
		for label in labels: examplesByLabel[label] = set([example for example in trainingSet if example.label == label])
		self.majorityLabel = reduce(lambda majority, next: majority if weigh(examplesByLabel[label]) > weigh(examplesByLabel[next]) else next, labels)
		self.majorityWeight = weigh(examplesByLabel[self.majorityLabel]) / weigh(trainingSet)
		self.isLeaf = (len(labels) == 1 or (type(self.maxDepth) == type(int()) and not self.maxDepth > 0))
		
		if self.isLeaf: return
		
		examplesByFeature = [set([example for example in trainingSet if example.features[f] == 1]) for f in range(len(list(trainingSet)[0].features))]
		
		entropyExamples = entropy(trainingSet, labels)
		
		gainsBySplitFeature = [entropyExamples - (entropy(examplesByFeature[f], labels) * weigh(examplesByFeature[f]) / weigh(trainingSet) + entropy((trainingSet - examplesByFeature[f]), labels) * weigh(trainingSet - examplesByFeature[f]) / weigh(trainingSet)) for f in range(len(examplesByFeature))]
		
		self.splitFeature = reduce(lambda (fMax, gainMax), (fNext, gainNext): (fMax, gainMax) if gainMax > gainNext else (fNext, gainNext), zip(range(len(gainsBySplitFeature)), gainsBySplitFeature))[0]
		
		if len(examplesByFeature[self.splitFeature]) == 0 or len(examplesByFeature[self.splitFeature]) == len(trainingSet):
			self.isLeaf = True
			self.splitFeature = None
		else:
			self.branches = ( \
			self.__class__(None if type(self.maxDepth) != type(int()) else self.maxDepth - 1, self.majorityLabel), \
			self.__class__(None if type(self.maxDepth) != type(int()) else self.maxDepth - 1, self.majorityLabel) \
			)
			
			self.branches[0].train(trainingSet - examplesByFeature[self.splitFeature])
			self.branches[1].train(examplesByFeature[self.splitFeature])
	
	def classify(self, instance):
		if self.isLeaf: return self.majorityLabel, self.majorityWeight
		
		assert self.branches != None
		assert instance.features[self.splitFeature] in [0, 1]
		
		return self.branches[instance.features[self.splitFeature]].classify(instance)

#encapsulates a transformed classifier
class ClassifierTransformed(Classifier):
	def __init__(self, classifier, transformer):
		self.classifier = classifier
		self.transformer = transformer
	
	def classify(self, instance): return self.classifier.classify(transformer.transform(instance))
	
	def train(self, trainingSet):
		trainingSet = set([transformer.transform(example) for example in trainingSet])
		self.classifier.train(trainingSet)

#reduces an example to only certain features (presumably, the significant ones!)
class Transformer:
	@classmethod
	def rowSelector(self, r, rowLength):
		return lambda instance: Example(instance.features[r*rowLength : (r+1)*rowLength], instance.label, instance.weight)
	
	@classmethod
	def columnSelector(self, c, rowLength):
		return lambda instance: Example([instance.features[i] for i in range(len(instance.features)) if i % rowLength == c], instance.label, instance.weight)
	
	@classmethod
	def componentsSelector(self, components):
		return lambda instance: Example([instance.features[i] for i in components], instance.label, instance.weight)
	
	def __init__(self, transformation): self.transformation = transformation
	
	def transform(self, instance): return self.transformation(instance)

class BoostedCommittee(Classifier):
	def __init__(self, classifiers = set()):
		assert type(classifiers) == type(set())
		self.classifiers = classifiers
		
	def train(self, trainingSet):
		assert type(trainingSet) == type(set())
		#normally, we boost over only the training set, anyway ...
		self.boost(trainingSet, [trainingSet])
	
	#... but we give the option of boosting over two dataSets
	def boost(self, trainingSet, testingSets):
		print "BOOSTING...", len(trainingSet), len(self.classifiers)
		assert type(trainingSet) == type(set()) and type(testingSets) == type(list())
		assert False not in [type(testingSet) == type(set()) for testingSet in testingSets]
		
		testingSet = testingSets.pop()
		normalizeWeights(testingSet)
		
		for classifier in self.classifiers:
			normalizeWeights(trainingSet)
			
			#propagates boosting to subsets, if we have any testing sets left
			if isinstance(classifier, BoostedCommittee) and len(testingSets) > 0: classifier.boost(trainingSet, testingSets)
			else: classifier.train(trainingSet)
			
			#re-weight the classifier (using the testingSet, not the trainingSet)
			classifierError = (1.0 - classifier.test(testingSet)) + 1e-6
			if classifierError > 0.5: classifier.weight = 0.0
			else: classifier.weight = (1.0/2.0) * math.log((1.0 - classifierError) / classifierError)
			
			#re-weight the examples
			for example in trainingSet: example.weight *= math.exp(classifierError if (classifier.classify(example)[0] != example.label) else -classifierError)
			print -classifierError, classifier.weight
		
		self.classifiers -= set([classifier for classifier in self.classifiers if classifier.weight == 0.0])
		assert len(self.classifiers) > 0
	
	def classify(self, instance):
		labelProbabilities = {}
		for classifier in self.classifiers:
			label, probability = classifier.classify(instance)
			if label not in labelProbabilities: labelProbabilities[label] = 0.0
			labelProbabilities[label] += probability * classifier.weight
		if len(labelProbabilities) == 0: return None, 0.0
		classification = reduce(lambda best, next: best if labelProbabilities[best] > labelProbabilities[next] else next, labelProbabilities.keys())
		return (classification if sum(labelProbabilities.values()) > 0.0 else None), ((labelProbabilities[classification] / sum(labelProbabilities.values())) if sum(labelProbabilities.values()) > 0.0 else 0.0)

#still got no better than 60%
#formerly 'Cohort'
class TransformersCommittee(BoostedCommittee):
	def __init__(self, ClassifierClass, transformers = set()):
		assert type(transformers) == type(set())
		self.transformers = transformers
		self.classifiers = set([ClassifierTransformed(ClassifierClass(), transformer) for transformer in self.transformers])