

#!/usr/bin/env python
#from svmutil import *
from classifiers.SVM import SVM
from dataset.DatasetImplementations import DatasetForLibSVM

class GSVM_RU:

	def __init__(self):
		self.category_for_claim={}
		#self.category_frec = {}

	def otra_forma(self, training_claim_list, validation_claim_list):
		majority_classes, minority_classes = self.get_majority_minority(training_claim_list)
		new_dataset=[]
		for major in majority_classes.keys():
			for minority in minority_classes.keys():

				subset= self.reduce_dataset(majority_classes[major], minority_classes[minority], validation_set)
				new_dataset.extend(subset)
				temp = set(new_dataset)
				new_dataset = list(temp)

		print 'longitud de new dataset ',len(new_dataset)
		return new_dataset


	def reduce_dataset(self, majority_class, minority_class, validation_claim_list):
		
		#self.calculate_category_frecuency(training_claim_list)

		print 'length majority class ', len(majority_class)
		print 'length minority class ', len(minority_class)
		agregation_dataset=[]
		agregation_dataset.extend(minority_class)
		previous_accuracy=-1.0
		new_accuracy=0.0
		training_set= []
		training_set.extend(majority_class)
		training_set.extend(minority_class)
		while new_accuracy>previous_accuracy:
			previous_accuracy = new_accuracy
			svm_example_selector = SVM()
			print 'training svm example selector'
			#training_set = self.set_new__categories(training_set)
			svm_example_selector.train(training_set)
			SVs = svm_example_selector.get_support_vectors()
			print 'converting feature to claims'
			informative_claims = self.convert_feature_to_claim(training_set, SVs)
			nlsv = [sv for sv in informative_claims if sv.y==majority_class[0].y] #obtener sv negativos
			print 'length nlsv', len(nlsv)
			#informative_claims = self.restore_original_categories(nlsv)
			#training_set = self.restore_original_categories(training_set)
			temp = []
			temp.extend(agregation_dataset)
			temp.extend(informative_claims)
			svm_evaluator = SVM()
			print 'training svm evaluator'
			svm_evaluator.train(temp)
			new_accuracy = svm_evaluator.test(validation_claim_list)
			print 'new accuracy', new_accuracy
			if new_accuracy>previous_accuracy:
				print 'agregando ', len(informative_claims), ' a agregation dataset'
				agregation_dataset.extend(informative_claims)
				# verificar si "example not in informative_claims" funciona bien.
				training_set = [example for example in training_set if example not in informative_claims]

		return agregation_dataset

	def get_majority_minority(self, training_claim_list):
		categories={}
		minority_classes={}
		majority_classes={}
		for claim in training_claim_list:
			try:
				categories[claim.y].append(claim)
			except KeyError:
				categories[claim.y]=[claim]

		for c in categories.keys():
			if len(categories[c])>50:
				majority_classes[c]=categories[c]
			else:
				minority_classes[c]=categories[c]

		return majority_classes, minority_classes


	def convert_feature_to_claim(self, training_set, SVs):
		output = []
		for sv in SVs:
			#print 'sv ',sv
			temp = set(sv.iteritems())
			#print type(training_set)
			for claim in training_set:
				#if claim.sparse_data()==sv:
				if len( set(claim.X.iteritems())- temp)== 0:
					output.append(claim)
					#print 'encontrado'
					break
		return output

	"""
	def calculate_category_frecuency(self, claim_list):
		self.category_frec.clear()
		for claim in claim_list:
			try:
				self.category_frec[claim.category]+=1
			except KeyError:
				self.category_frec[claim.category]=1


	def set_new__categories(self, training_claim_list):

		self.category_for_claim.clear()
		for c in training_claim_list:
			self.category_for_claim[c.get_id()] = c.get_category()
			if self.category_frec[c.get_category()]>50:
				tag = 'majority'
			else:
				tag = 'minority'
			c.set_category(tag)

		return training_claim_list"""

	"""def restore_original_categories(self, claim_list):
		for claim in claim_list:
			claim.set_category(self.category_for_claim[claim.get_id()])
		return claim_list"""


if __name__ == '__main__':
	data = DatasetForLibSVM()
	training_set = data.get_training_set()
	print 'length training set ', len(training_set)
	validation_set = data.get_validation_set()
	svm = SVM()
	svm.train(training_set)
	acurracy= svm.test( data.get_test_set() )
	print ' previous acurracy: ',acurracy
	data_selector = GSVM_RU()
	new_training_set = data_selector.otra_forma(training_set, validation_set)
	print 'length new training set', len(new_training_set)
	#data.save_dataset('new_training_set.dat', new_training_set)
	
	svm = SVM()
	svm.train(new_training_set)
	acurracy= svm.test( data.get_test_set() )
	print 'new acurracy: ',acurracy
