'''
Created on 12/01/2013

@author: Jorge
'''
from Classifier import Classifier
from sklearn import svm
import sklearn
import sys
import numpy as np
np.set_printoptions(threshold=sys.maxint)
from VectorizedClaim import Example
from dataset.DatasetImplementations import CustomDataSet
from classifiers.SVMTreeExample import SVMTreeExample
import scipy
from classifiers.OptimizateProbabilityParam import OptimizateProbabilityParam
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score

class MULTISVM(Classifier):
    '''
    classdocs
    '''


    def __init__(self):
        '''
        Constructor
        '''
        self.models = {}
        
        self.sigmoid_param={}
    def train(self, train_set):
        print 'training...'
        
        X,Y,upper_Y = self._get_X_Y_upper_Y_vectors(train_set)
        
        init_A=0
        init_B=0 #initial param for optimization
        
        classes = {}
        for i in range(len(Y)):
            try:
                (classes[Y[i]]).append(X[i])
            except KeyError:
                classes[Y[i]] = [ X[i] ]
        
        categories = classes.keys()
        num_classes = len(categories)
        X_by_class = {}
        for c in categories:
            
            rest_class = classes.keys()
            rest_class.remove(c)
            rest = []
            for list_claims in [classes[t] for t in rest_class ]:
                for claim in list_claims:
                    rest.append(claim)
            
            X_submodel =  [t for t in classes[c] ] + rest
            Y_submodel = [1]*len(classes[c]) + [0]*len(rest)
            print 'original features len ', len(X_submodel[0])
            X_submodel = self.select_features(X_submodel,c)
            X_by_class[c]=X_submodel
            print 'features len', len(X_submodel[0])
            """for x in X:
                print np.isfinite(sum(x))
                print np.isfinite(x)"""
            self.models[c] = self.get_optimal_model(X_submodel, Y_submodel)
            #self.test_model_jaja(self.models[c], X_submodel, Y_submodel)
            opt = OptimizateProbabilityParam(self.models[c], X_submodel,Y_submodel)
            print 'antes de optimizar ', str(opt.minimaze([init_A,init_B]))
            hessian = opt.hessian_matriz([init_A,init_B])
            grad = opt.gradient([init_A,init_B])
            grad.shape = (2,1)
            print 'hessian ', hessian
            print 'hessian determinate',  np.linalg.det (hessian)
            print 'gradient ', grad.shape
            d= -1*np.matrix(hessian).I*np.matrix(grad)
            t1  = np.array(grad)
            t1.shape= (2,1)
            t2 = np.array(d)
            t2.shape = (2,1)
            print 'esto! ', np.sum(t1*t2)
            A, B = scipy.optimize.fmin_ncg(f=opt.minimaze, x0=np.array([init_A,init_B]), fprime=opt.gradient, fhess=opt.hessian_matriz)
            self.sigmoid_param[c]= (A, B)
    
        
    def test(self, test_set):
        
        print 'testing...'
        X,Y,upper_Y = self._get_X_Y_upper_Y_vectors(test_set)

        classes = self.models.keys()
        #num_classes = [  for c in classes]
        matrix = np.zeros([len(classes), len(X)])
        i=0
        for model_name in classes:
            A, B = self.sigmoid_param[model_name]
            X_temp = [ list(t) for t in X]
            print 'original features len ', len(X_temp[0])
            X_temp = self.select_features(X_temp,model_name)
            print 'features len' , len(X_temp[0])
            temp = self.models[model_name].predict_proba(X_temp)[:,1]
            temp = (1+0.0)/(1+ np.exp(A*temp+B))
            matrix[i,:] = temp
            i+=1
        #print 'prob matriz ',matrix.shape,'\n', matrix
        for i in range(matrix.shape[0]):
            matrix[:,i] = (matrix[:,i]+0.0)/sum(matrix[:,i])
        
        index_maxs= matrix.argmax(axis=0) # obtenemos mayor elemento por columna
        predictions = []
        for index in index_maxs:
            predictions.append(classes[index])
        
        print ' f1: ', sklearn.metrics.f1_score(Y, predictions)
        label= [Example.categories[i] for i in classes]
        print ' confusion matrix \n' , sklearn.metrics.confusion_matrix(Y, predictions)
        print classification_report(Y, predictions)
        
        
    def select_features(self, X, num_category):
        
        c = SVMTreeExample.categories[num_category]
        print 'c=', c
        indexes = self.get_index_features(c)
        new_X = []
        for e in X:
            new_X.append( [f for index,f in enumerate(e) if index in indexes ] )
        return new_X
            
    def get_index_features(self, c):
        input = open('../data/index_features/'+c+'.dat', "r")
        line = input.readline()
        indexes = [int(i)  for i in line[1: len(line)-1].split(',')]
        input.close()
        return indexes
    
    def test_model_jaja(self, model, X, Y):
        pred = model.predict(X)
        print 'f1 score ',sklearn.metrics.f1_score(Y, pred)
        
    def get_optimal_model(self, X, Y):
        C = 2.0**np.array(range(-5,16,2))
        #gamma = 2.0**np.array(range(-15,4,2))
        tuned_parameters = [{'kernel': ['linear'],
                     'C': C}]
        clf = GridSearchCV(svm.SVC(C=1, probability=True), tuned_parameters, score_func=f1_score)
        clf.fit(X, Y, cv=5)
        print classification_report(Y, clf.predict(X))
        return clf.best_estimator_
        
        
    
    
    
if __name__ == '__main__':
    classifier = MULTISVM()
    
    data = CustomDataSet()
    training_set = data.get_training_set()
    training_set.extend(data.get_validation_set())
    test_set = data.get_test_set()
    
    training_set, test_set =  Classifier.convert_example(training_set, test_set)
    
    classifier.train(training_set)
    classifier.test(test_set)
                