# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 15:13:34 2014

@author: ben
"""
import sklearn.naive_bayes as bayes
import sklearn.metrics as metrics
import numpy as np
import itertools
from Timm.stats import *
from Timm.about import Num
import scipy.stats as ss
from library import getAttributeValuesFromSplits2
import matplotlib.pyplot as plt
from itertools import cycle

colors=['r', 'g', 'b', 'c', 'm', 'k', 'y']
markers=['^','s', 'p', 'h', '8', 'o']
colorcycle=cycle(colors)
markercycle=cycle(markers)

def FI_disc(train, test, label='class'):
    discTrain = train.createDataSet()
    discTest=test.createDataSet()
    for i in range(len(train.attributes)):
        discAttribute = discTrain.attributes[i]
        testAttribute=discTest.attributes[i]
        discAttribute.splits = None
        testAttribute.splits = None
        train.attributes[i].checkAttributeId(discAttribute)
        test.attributes[i].checkAttributeId(testAttribute)
        if train.attributes[i].isNumeric and i != len(train.attributes)-1:
            pairs = train.GetPairs(i, label, True)
            pairs.sort(key = lambda pair: 0-pair.x)
            discAttribute.splits = train.getColumSplitsForFayyadIrani(pairs)
            testAttribute.splits=discAttribute.splits[:]
            discAttribute.isNumeric = False
            testAttribute.isNumeric=False
            discAttribute.attributeValues = getAttributeValuesFromSplits2(discAttribute.splits)
            testAttribute.attributeValues = getAttributeValuesFromSplits2(testAttribute.splits)

    for instance in train.instances:
        discInstance = discTrain.discretizeInstance(instance)
        discTrain.addInstance(discInstance)
        
    for instance in test.instances:
        discInstance = discTest.discretizeInstance(instance)
        discTest.addInstance(discInstance)

    return discTrain, discTest

def EW_disc(train, test, nrBins, label='class'):
    dataSet = train.createDataSet()
    testSet=test.createDataSet()
    for i in range(len(train.attributes)):
        discAttribute = dataSet.attributes[i]
        testAttribute=testSet.attributes[i]
        discAttribute.splits = None
        train.attributes[i].checkAttributeId(discAttribute)
        test.attributes[i].checkAttributeId(testAttribute)
        if train.attributes[i].isNumeric and i != len(train.attributes)-1:
            pairs = train.GetPairs(i, label, True)
            pairs.sort(key = lambda pair: pair.x)
            discAttribute.splits = train.getColumnSplitsForEqualWidth(pairs, nrBins)
            testAttribute.splits=discAttribute.splits
            discAttribute.isNumeric = False
            testAttribute.isNumeric = False
            discAttribute.attributeValues = getAttributeValuesFromSplits2(discAttribute.splits)
            testAttribute.attributeValues = getAttributeValuesFromSplits2(testAttribute.splits)

    for instance in train.instances:
        discInstance = dataSet.discretizeInstance(instance)
        dataSet.addInstance(discInstance)
        
    for instance in test.instances:
        discInstance = dataSet.discretizeInstance(instance)
        testSet.addInstance(discInstance)

    return dataSet, testSet

class ParamTuneLearner:
    learnerName=""
    params={} #default parameters
    default_params={}
    active_params={}
    
    def __init__(self):
        pass
        
    def callLearner(self, runParams, test_set, train_set):
        raise Exception("callLearner not implimented for "+self.learnerName+" please extend ParamTuneLearner class.")
        
    def callLearnerDefault(self, test_set, train_set):
        return self.callLearner(self.default_params, test_set, train_set)
        
    
 
class ParamTuneStudy(object):
    learners={}
    default_cross_val_folds=2
    default_cross_val_repeats=2
    default_param_permutations=2
    default_percent_test=0.333
    number_of_threads=4
    results=None  
    
    def __init__(self, dataSet, setname):
        self.learners={'BernoulliBayes':BernoulliBayes(),
                  'MultinomialBayes':MultinomialBayes(),
                  'GaussianBayes':GaussianBayes(),
                  'BenBayes':BenBayes()}
        self.dataSet=dataSet
        self.setName=setname
    
    def printResults(self, params=['rank', 'pD', 'pF', 'f', 'g', 'precision', 'recall', 'ROC-AUC'], results=None, setName=None):
        if not setName:
            setName=self.setName
        if type(setName)==list:
            setName=setName[0]
        phil=open('logs/'+setName+'.txt', 'a')
        phil2=open('logs/all.txt', 'a')
        print setName
        phil.write(setName+'\n')
        phil2.write(setName+'\n')
        phil.write(' '.join([i.ljust(10) for i in params]+['Learner'.ljust(10)])+'\n')
        phil2.write(' '.join([i.ljust(10) for i in params]+['Learner'.ljust(10)])+'\n')
        print(' '.join([i.ljust(10) for i in params]+['Learner'.ljust(10)]))
        if results:
            for result in results:
                phil.write(result.getPrint(params)+result.learnerName+' '+str(result.learnerParams)+'\n')
                phil2.write(result.getPrint(params)+result.learnerName+' '+str(result.learnerParams)+'\n')
                print result.getPrint(params)+result.learnerName+' '+str(result.learnerParams)

        else:
            r1=self.results[0].metrics['rank']
            to_be_printed=[i for i in self.results if i.learnerName=="SciKit-Learn Gaussian NB"]
            to_be_printed+=[i for i in self.results ]#if i.metrics['rank']==r1]
            for result in to_be_printed:
                phil.write(result.getPrint(params)+result.learnerName+' '+str(result.learnerParams)+'\n')
                phil2.write(result.getPrint(params)+result.learnerName+' '+str(result.learnerParams)+'\n')
                print result.getPrint(params)+result.learnerName+' '+str(result.learnerParams)
        phil.close()
        phil2.write('\n')
        phil2.close()
        print " "
                  
    def gridSearch(self, gridDefinition=None, cross_val_folds=0, cross_val_repeats=0, param_permutations=0, percent_test=0, ranking_param='g'):
        if not cross_val_folds:    cross_val_folds=   self.default_cross_val_folds
        if not cross_val_repeats:  cross_val_repeats= self.default_cross_val_repeats
        if not param_permutations: param_permutations=self.default_param_permutations
        if not percent_test:       percent_test=      self.default_percent_test
        if not gridDefinition:
            gridDefinition={}
            for key, value in self.learners.iteritems():
                gridDefinition.update({key:value.params.copy()})
            for learnerName, learnerParams in gridDefinition.iteritems():
                for param_name, param in learnerParams.iteritems():
                    if param[0]=='values':
                        param=param[1:]
                    elif param[0]=='range':
                        param=np.linspace(param[1],param[2],param_permutations)
                    else:
                        raise Exception("Unknown parameter definition type: "+str(param[0]))
                    learnerParams[param_name]=param
                #gridDefinition[learnerName]=learnerParams
                param_names=learnerParams.keys()
                param_values=[[]]*len(param_names)
                for index, param_name in enumerate(param_names):
                    param_values[index]=learnerParams[param_name]
                all_permutations=[i for i in itertools.product(*param_values)]
                for index, perm in enumerate(all_permutations):
                    param_dict={}
                    for pindex, name in enumerate(param_names):
                        param_dict.update({name:perm[pindex]})
                    all_permutations[index]=param_dict
                gridDefinition[learnerName]=all_permutations
        results={}
        for fold in range(0,cross_val_folds):
            test, train = self.dataSet.randomTestTrainSplit(percent_test)
            for repeat in range(0, cross_val_repeats):
                for learnerName, learner in self.learners.iteritems():
                    tempResults={}
                    for permutation in gridDefinition[learnerName]:
                        result=learner.callLearner(permutation, test, train)
                        tempResults.update({str(permutation):result})
                    if not results.has_key(learnerName):
                        results.update({learnerName:tempResults.copy()})
                    else:
                        for treatment, result in tempResults.iteritems():
                            results[learnerName][treatment].addPoint(result)
        
       
        allresults=[]
        for learner in results.values():
            for treatment in learner.values():
                treatment.computeMetrics()
                allresults.append(treatment.copy())
        
        

        for item in allresults: item.makeNums();
        allresults.sort(reverse=False, key=lambda x: x.metrics[ranking_param])
        scottknott([item.constituent_num[ranking_param] for item in allresults])
        for item in allresults: 
            item.metrics.update({'rank':item.constituent_num[ranking_param].rank})
            if item.metrics['pD']==1 and item.metrics['pF']==1:
                item.metrics['rank']==0
        
        allresults.sort(reverse=False, key=lambda x: x.constituent_num[ranking_param].rank)
        allresults.reverse()
        self.results=[i.copy() for i in allresults]
        
        return self.results
                
            
class TemporalParamTuneStudy(ParamTuneStudy):
    default_cross_val_folds=4
    default_cross_val_repeats=4
    default_param_permutations=5
    default_percent_test=1.0/default_cross_val_folds
    
    def __init__(self, dataSets, setnames):
        self.learners={
                  'BenBayes':BenBayes()}
        self.temporalDataSets=dataSets
        self.setName=setnames
        
    def printResults(self, params=['pD', 'pF', 'f', 'g', 'precision', 'recall', 'ROC-AUC']):        
        for study_ind in range(len(self.temporal_optimized_result)):
            super(TemporalParamTuneStudy, self).printResults(params, results=self.temporal_optimized_result[study_ind]+self.temporal_straw_result[study_ind], setName=self.setName[study_ind+1])
            
    def gridSearch(self, gridDefinition=None, cross_val_folds=0, cross_val_repeats=0, \
                    param_permutations=0, percent_test=0, ranking_param='g', \
                    include_detlas=False, num_top_learners=5):
        num_studies=len(self.temporalDataSets)-1
        strawLearner=GaussianBayes()
        if not cross_val_folds:    cross_val_folds=   self.default_cross_val_folds
        if not cross_val_repeats:  cross_val_repeats= self.default_cross_val_repeats
        if not param_permutations: param_permutations=self.default_param_permutations
        if not percent_test:       percent_test=      self.default_percent_test
        if not gridDefinition:
            gridDefinition={}
            for key, value in self.learners.iteritems():
                gridDefinition.update({key:value.params.copy()})
            for learnerName, learnerParams in gridDefinition.iteritems():
                for param_name, param in learnerParams.iteritems():
                    if param[0]=='values':
                        param=param[1:]
                    elif param[0]=='range':
                        param=np.linspace(param[1],param[2],param_permutations)
                    else:
                        raise Exception("Unknown parameter definition type: "+str(param[0]))
                    learnerParams[param_name]=param
                #gridDefinition[learnerName]=learnerParams
                param_names=learnerParams.keys()
                param_values=[[]]*len(param_names)
                for index, param_name in enumerate(param_names):
                    param_values[index]=learnerParams[param_name]
                all_permutations=[i for i in itertools.product(*param_values)]
                for index, perm in enumerate(all_permutations):
                    param_dict={}
                    for pindex, name in enumerate(param_names):
                        param_dict.update({name:perm[pindex]})
                    all_permutations[index]=param_dict
                gridDefinition[learnerName]=all_permutations        
        optimized_results=[]
        unoptimized_results=[]
        for study_ind in range(num_studies):
            studyResults=[]
            studyStrawResults=[]
            learnSet=self.temporalDataSets[study_ind]
            testSet=self.temporalDataSets[study_ind+1]
            self.dataSet=learnSet
            super(TemporalParamTuneStudy, self).gridSearch(gridDefinition, cross_val_folds,\
                  cross_val_repeats, param_permutations, percent_test, ranking_param)
            super(TemporalParamTuneStudy, self).printResults(setName=self.setName[study_ind+1])
            learnerResults=self.results[:num_top_learners]
            
            for result in learnerResults:
                learner=[i for i in self.learners.values() if i.learnerName==result.learnerName]
                learner=learner[0]
                params=result.learnerParams
                new_result=learner.callLearner(params, testSet, learnSet)
                if not len(studyStrawResults):
                    studyStrawResults=[strawLearner.callLearner({}, testSet, learnSet)]
                #for i in range(cross_val_repeats-1):
                    new_result.addPoint(learner.callLearner(params, testSet, learnSet))
                    #straw_result_temp.addPoint(strawLearner.callLearner({}, testSet, learnSet))
                new_result.computeMetrics()
                #straw_result_temp.computeMetrics()
                studyResults.append(new_result.copy())
                #studyStrawResults.append(straw_result_temp.copy())
                    
            optimized_results.append(studyResults)
            unoptimized_results.append(studyStrawResults)
        self.temporal_optimized_result=optimized_results
        self.temporal_straw_result=unoptimized_results
        return self
    
    
    def plot_temporal_results(self):
        global colorcycle, colors
        colorcycle=cycle(colors)
        fig1=plt.figure(1)
        ax1=fig1.add_subplot(111)
        ax1.hold(True)
        for version_ind in range(len(self.temporal_optimized_result)):
            optimized=self.temporal_optimized_result[version_ind]
            unoptimized=self.temporal_straw_result[version_ind]
            color=colorcycle.next()
            X=[i.metrics['pD'] for i in optimized]
            Y=[1.0-i.metrics['pF'] for i in optimized]
            ax1.scatter(X, Y, color=color, marker='+', s=30, label=self.setName[version_ind+1]+' optimized')
            X=[i.metrics['pD'] for i in unoptimized]
            Y=[1.0-i.metrics['pF'] for i in unoptimized]
            ax1.scatter(X, Y, color=color, marker='x', s=30, label=self.setName[version_ind+1]+' Gaussian')
        ax1.set_xlabel('pD')
        ax1.set_ylabel('1-pF')
        xlim=ax1.get_xlim()
        ylim=ax1.get_ylim()
        #ax1.set_xlim(min(xlim[0],ylim[0]),max(xlim[1],ylim[1]))
        #ax1.set_ylim(min(xlim[0],ylim[0]),max(xlim[1],ylim[1]))
        ax1.set_xlim(0,1)
        ax1.set_ylim(0,1)
        ax1.set_aspect(1)
        ax1.set_title(self.setName[0][:-4])
        ax1.legend(loc=4, fontsize=12)
        fig1.show()
        fig1.savefig('graphs/%spDpF.png'%self.setName[0][:-4])
        
        
        colorcycle=cycle(colors)
        fig2=plt.figure(2)
        ax2=fig2.add_subplot(111)
        ax2.hold(True)
        for version_ind in range(len(self.temporal_optimized_result)):
            optimized=self.temporal_optimized_result[version_ind]
            unoptimized=self.temporal_straw_result[version_ind]
            color=colorcycle.next()
            X=[i.metrics['f'] for i in optimized]
            Y=[i.metrics['g'] for i in optimized]
            ax2.scatter(X, Y, color=color, marker='+', s=30, label=self.setName[version_ind+1]+' optimized')
            X=[i.metrics['f'] for i in unoptimized]
            Y=[i.metrics['g'] for i in unoptimized]
            ax2.scatter(X, Y, color=color, marker='x', s=30, label=self.setName[version_ind+1]+' Gaussian')
        ax2.set_xlabel('f')
        ax2.set_ylabel('g')
        xlim=ax2.get_xlim()
        ylim=ax2.get_ylim()
        #ax2.set_xlim(min(xlim[0],ylim[0]),max(xlim[1],ylim[1]))
        #ax2.set_ylim(min(xlim[0],ylim[0]),max(xlim[1],ylim[1]))
        ax2.set_xlim(0,1)
        ax2.set_ylim(0,1)
        ax2.set_aspect(1)
        ax2.set_title(self.setName[0][:-4])
        ax2.legend(loc=4, fontsize=12)
        fig2.show()
        fig2.savefig('graphs/%sFG.png'%self.setName[0][:-4])
        plt.close(fig1)
        plt.close(fig2)
            
        
            
        
            
            
            
                
                

 
class ExperimentalResults:
    metrics={'accuracy':None,
             'precision':None,
             'recall':None,
             'pD':None,
             'pF':None,
             'confusion matrix':None,
             'f':None,
             'g':None,
             'Jaccard Score':None,
             'ROC-AUC':None
             }
    constituent_metrics=None
    constituent_num=None
    
    def __init__(self, learner, params, actual, predicted):
        self.learnerName=learner[:]
        self.learnerParams=params.copy()
        self.actual=actual[:]
        self.predicted=list(predicted[:])
        self.metrics=self.metrics.copy()
        self.computeMetrics()
        
    def copy(self):
        new_one=ExperimentalResults(self.learnerName, self.learnerParams, self.actual, self.predicted)
        new_one.metrics=self.metrics.copy()
        new_one.constituent_metrics=self.constituent_metrics.copy()
        return new_one
        
    def getPrint(self, params):
        outstr=""
        for item in params:
            try:
                if int(self.metrics[item])==self.metrics[item]:
                    outstr+=("%u"%self.metrics[item]).ljust(10)+' '
                    continue
            except Exception:
                pass
            try:
                outstr+=("%1f"%self.metrics[item]).ljust(10)+' '
            except Exception:
                pass
        return outstr
        
    def makeNums(self):
        "Adds a bunch of Timm's num objects to represent constituent metrics"
        self.computeMetrics()
        self.constituent_num={}
        for measure, values in self.constituent_metrics.iteritems():
            try:
                float(values[0])
            except Exception:
                continue
            self.constituent_num.update({measure:Num(name=measure, inits=values[:])})
            
    
    def addPoint(self,newResult):
        for key in newResult.metrics.keys():
            self.constituent_metrics[key]+=[newResult.metrics[key].copy()]
        self.actual=self.actual+newResult.actual[:]
        self.predicted=self.predicted+newResult.predicted[:]
        self.computeMetrics()

    
        
    def computeMetrics(self):
        if self.constituent_metrics and len(self.constituent_metrics)>1:
            self.metrics={}
            for key in self.constituent_metrics.keys():
                if key=='Jaccard Score' or key =='class precision':
                    continue
                self.metrics.update({key:np.mean(self.constituent_metrics[key])})
        else:
            self.metrics={'accuracy':metrics.accuracy_score(self.actual,self.predicted),
                                 'precision':metrics.precision_score(self.actual,self.predicted),
                                 'recall':metrics.recall_score(self.actual,self.predicted),
                                 'confusion matrix':metrics.confusion_matrix(self.actual,self.predicted),
                                 'Jaccard Score':metrics.jaccard_similarity_score(self.actual,self.predicted),
                                 'ROC-AUC':metrics.accuracy_score(self.actual,self.predicted),
                                 'class precision':metrics.precision_score(self.actual,self.predicted, average=None)
                                 }
            cm=self.metrics['confusion matrix']
            d=cm[1][1]
            b=cm[1][0]
            c=cm[0][1]
            a=cm[0][0]
            pd   = 1.0*d         / (b+d)
            pf   = 1.0*c         / (a+c)
            pn   = 1.0*(b+d)     / (a+c)
            prec = 1.0*d         / (c+d)
            g    = 2.0*(1-pf)*pd / (1-pf+pd)
            f    = 2.0*prec*pd/(prec+pd)
            assert(sum(cm[1])==sum(self.actual))
            self.metrics.update({'pD':pd,
                                 'pF':pf,
                                 'g':g,
                                 'f':f,
                                 'pN':pn})
        if not self.constituent_metrics:
            self.constituent_metrics=self.metrics.copy()
            for key, item in self.constituent_metrics.iteritems():
                self.constituent_metrics[key]=[item.copy()]
            
    def __repr__(self):
        outstr=self.learnerName[:]+': '
        for key, value in self.metrics.iteritems():
            try:
                float(value)
            except Exception:
                continue
            outstr+=key+"="+str(value)+', '
        if outstr.endswith(', '):
            outstr=outstr[:-2]
        return outstr
        

   
     
class BernoulliBayes(ParamTuneLearner):
    learnerName="SciKit-Learn Bernoulli NB"
    params={'alpha':['range', 0.01, 1.0], 
            'binarize':['range', 0.0, 1.0], 
            'fit_prior':['values', True, False]}
    default_params={'alpha':1.0, 
                    'binarize':0.5, 
                    'fit_prior':True}    
    
            
    def callLearner(self, runParams, test_set, train_set):
        clf=bayes.BernoulliNB(alpha=runParams['alpha'], 
                              binarize=runParams['binarize'],
                              fit_prior=runParams['fit_prior'])
        clf.fit([map(float,i.attributeValues[:-1]) for i in train_set.instances], 
                 map(int,[i.attributeValues[-1] for i in train_set.instances]))
        predicted=clf.predict([map(float,i.attributeValues[:-1]) for i in test_set.instances])
        true=[int(i.attributeValues[-1]) for i in test_set.instances]
        return ExperimentalResults(self.learnerName, runParams, true, predicted)
    
                
        
class GaussianBayes(ParamTuneLearner):
    learnerName="SciKit-Learn Gaussian NB"
    
    def callLearner(self, runParams, test_set, train_set):
        clf=bayes.GaussianNB()
        clf.fit([map(float,i.attributeValues[:-1]) for i in train_set.instances], 
                 [map(int,i.attributeValues[-1]) for i in train_set.instances])
        predicted=clf.predict([map(float,i.attributeValues[:-1]) for i in test_set.instances])
        true=[int(i.attributeValues[-1]) for i in test_set.instances]
        return ExperimentalResults(self.learnerName, runParams, true, predicted)

    
class MultinomialBayes(ParamTuneLearner):
    learnerName="SciKit-Learn Multinomial NB"
    params={'alpha':['range', 0.0, 1.0], 
            'fit_prior':['values', True, False]}
    default_params={'alpha':1.0, 
                    'fit_prior':True}
            
    def callLearner(self, runParams, test_set, train_set):
        clf=bayes.MultinomialNB(alpha=runParams['alpha'], 
                              fit_prior=runParams['fit_prior'])
        clf.fit([map(float,i.attributeValues[:-1]) for i in train_set.instances], 
                 [map(int,i.attributeValues[-1]) for i in train_set.instances])
        predicted=clf.predict([map(float,i.attributeValues[:-1]) for i in test_set.instances])
        true=[int(i.attributeValues[-1]) for i in test_set.instances]
        return ExperimentalResults(self.learnerName, runParams, true, predicted)
        

class BenBayes(ParamTuneLearner):
    learnerName="Ben's NB Implementation"
    params={'k':['range' ,0.0 , 3.0],
            'm':['range' ,0.0 , 5.0],
            'discretize':['values', False, 3, 5, 10, 'FI']}
    default_params={'k':1.0,
                    'm':2.0,
                    'discretize':False}

    def makeGaussian(self, values):
        m=np.mean(values)
        sd=np.std(values)
        a = 1.0/np.sqrt(2*np.pi*sd**2)
        return lambda(x):a*np.exp(-(x-m)**2/(2*sd**2))
    
    
    def callLearner(self, runParams, test_set, train_set):
        #discretization
        if runParams['discretize']=='FI':
            train_set, test_set = FI_disc(train_set, test_set)
        elif type(runParams['discretize'])==int:
            bins=runParams['discretize']
            train_set, test_set = EW_disc(train_set, test_set, bins)
            
        #training
        outcomes=[int(i.attributeValues[-1]) for i in train_set.instances]
        outcomes=set(outcomes)
        evidenceDict={}
        priorDict={}
        gaussDict={}
        for outcome in outcomes:
            if runParams['discretize']:
                evidenceDict.update({outcome: \
                [i.attributeValues[:-1] for i in train_set.instances \
                if str(i.attributeValues[-1])==str(outcome)]})
            else:
                evidenceDict.update({outcome: \
                [map(float,i.attributeValues[:-1]) for i in train_set.instances \
                if str(i.attributeValues[-1])==str(outcome)]})
                gaussDict.update({outcome:[self.makeGaussian([i[j] for i in evidenceDict[outcome]])\
                    for j in range(len(evidenceDict[outcome][0]))]})
            priorDict.update({outcome:float(len(evidenceDict[outcome])+runParams['k'])/train_set.NrInstances()})
        #testing
        true=[int(i.attributeValues[-1]) for i in test_set.instances]
        predicted=[]
        if runParams['discretize']:
            for testFeats in [i.attributeValues[:-1] for i in test_set.instances]:
               best=0
               bestClass=None
               for outcome in outcomes:
                   temp=np.product([(len([i for i in evidenceDict[outcome] if i[j]==testFeats[j]])+ \
                       runParams['m'])/(len(evidenceDict[outcome])+runParams['m']) for j in range(len(testFeats))])
                   temp*=priorDict[outcome]
                   if temp>=best:
                        best=temp
                        bestClass=outcome
               #a=1/0
               predicted.append(bestClass)
            
            
                   
        else:
            for testFeats in [map(float,i.attributeValues[:-1]) for i in test_set.instances]:
                best=0
                bestClass=None
                for outcome in outcomes:
                    gauss=gaussDict[outcome]
                    temp=np.product([gauss[i](testFeats[i])+runParams['m'] for i in range(len(testFeats))])
                    temp*=priorDict[outcome]
                    if temp>=best:
                        best=temp
                        bestClass=outcome
                predicted.append(bestClass)
        #print predicted
        #print true
        return ExperimentalResults(self.learnerName, runParams, true[:], predicted[:])
        
        
        
        
        
        