# nems.py

from library import *
from mucker import *
#from ParamTune import *
import os
import math
import numpy as np
from datetime import *
from scipy.stats import wilcoxon
from scipy.stats.mstats import mquantiles
#import matplotlib.pyplot as plt
from itertools import cycle
import Queue
import multiprocessing
#import sklearn.naive_bayes as bayes

target_class= '1'
colors=['r', 'g', 'b', 'c', 'm', 'k', 'y']
markers=['^','s', 'p', 'h', '8', 'o']
colorcycle=cycle(colors)
markercycle=cycle(markers)
a=None
wtl=None

def main():
    import sys
    from os import listdir
    from os.path import isfile, join
    from datetime import datetime
    from time import sleep

    if len(sys.argv)==2:
             print eval(sys.argv[1] + '()')
    else:
        
        global a
        #print('Reading HOG')
        data_hog=ReadDataSet('hog.arff')
        #print('discretizing HOG')
        data_hog2=data_hog.EqualWidthDiscretization('class',8)
        #print('Selecting HOG features')
        feat_hog=data_hog2.featureReductionViaInfoGain('class',0.005)
        data_hog3=data_hog.applyFSS(feat_hog)
        #print('Exporting reduced HOG')
        data_hog3.exportDataSetToArff('hog_selected.arff')
        print ','.join([str(i) for i in feat_hog])
        
        #print('Reading wav')
        data_wav=ReadDataSet('wav.arff')      
        #print('discretizing wav')
        data_wav2=data_wav.EqualWidthDiscretization('class',8)
        #print('Selecting wav features')
        feat_wav=data_wav2.featureReductionViaInfoGain('class',0.005)
        data_wav3=data_wav.applyFSS(feat_wav)
        #print('Exporting reduced wav')
        data_wav3.exportDataSetToArff('wav_selected.arff')
        print ','.join([str(i) for i in feat_wav])
        
        #a=experimentTune()
        #a=buisnessCaseforAllSets()

def experimentTune(srcDir = "data_arff/"):
    """Runs the buisness-case analysis on all sets in srcDir (below)"""
    srcDir="data_arff/"
    dataSets=os.listdir(srcDir)
    dataSets=[i for i in dataSets if i.startswith('ivy')]
    dataSets.sort()
    multiSets=[]
    outs=[]
    args=[]
    generalLogFile =open('logs/log_1.txt', 'w')
    generalLogFile2=open('logs/log_2.txt', 'w')
    logfiles=[generalLogFile, generalLogFile2]
    for s in dataSets:
        diff= dataSets[:]
        diff.remove(s)
        diff=[i[:4] for i in diff]
        if diff.__contains__(s[:4]):
            multiSets.append(s)
    nestedSets=[]
    while multiSets:
        group=[multiSets[0]]
        multiSets=multiSets[1:]
        group+=[i for i in multiSets if i.startswith(group[0][:4])]
        multiSets=[i for i in multiSets if not i.startswith(group[0][:4])]
        nestedSets.append(group)
    for group in [i for i in nestedSets if len(i)>=3]:
        versions=[]
        for dataSet in group: versions.append(ReadDataSet(srcDir + dataSet))
        version_names=['.'.join(i.split('.')[:-1]) for i in group]
        study=TemporalParamTuneStudy(versions, version_names)
        result=study.gridSearch()
        study.printResults()
        study.plot_temporal_results()
        #break
    return result
    
def experimentTuneOld(srcDir = "data_arff/"): 
    if srcDir=="data_arff/":
        dataSets=['ant-1.7.arff','ivy-1.1.arff', 'jedit-4.1.arff', 'log4j-1.1.arff', 'lucene-2.4.arff', 'poi-3.0.arff', 'synapse-1.2.arff', 'velocity-1.6.arff', 'xalan-2.6.arff', 'xerces-1.4.arff']
    for setname in dataSets:
        dataSet = ReadDataSet(srcDir+setname)
        study=ParamTuneStudy(dataSet, setname[:-5])
        study.learners={'BenBayes':BenBayes()}
        result=study.gridSearch()
        study.printResults()
        #break
    return result

def buisnessCaseforAllSets():
    """Runs the buisness-case analysis on all sets in srcDir (below)"""
    srcDir="data_csv/"
    dataSets=os.listdir(srcDir)
    dataSets=[i for i in dataSets if not i.startswith('ivy')]
    dataSets.sort()
    multiSets=[]
    outs=[]
    args=[]
    generalLogFile =open('logs/log_1.txt', 'w')
    generalLogFile2=open('logs/log_2.txt', 'w')
    logfiles=[generalLogFile, generalLogFile2]
    for s in dataSets:
        diff= dataSets[:]
        diff.remove(s)
        diff=[i[:4] for i in diff]
        if diff.__contains__(s[:4]):
            multiSets.append(s)
    nestedSets=[]
    while multiSets:
        group=[multiSets[0]]
        multiSets=multiSets[1:]
        group+=[i for i in multiSets if i.startswith(group[0][:4])]
        multiSets=[i for i in multiSets if not i.startswith(group[0][:4])]
        nestedSets.append(group)
    for group in [i for i in nestedSets if len(i)>3]:
        versions=[]
        for dataSet in group: versions.append(readCK_CSV(srcDir + dataSet))
        version_names=['.'.join(i.split('.')[:-1]) for i in group]
        #out=buisnessCaseforSingleProduct(versions, version_names, logfiles)
        #outs.append(out)
        arg=versions, version_names, logfiles
        args.append(arg)
        #return out #shortcut for one eval
    pool=multiprocessing.Pool(processes=4)
    #print args[0]
    #res=pool.map_async(buisnessCaseforSingleProductThread, args)
    #outs=res.get()
    outs=map(buisnessCaseforSingleProductThread, args)
    WTL={}
    for out in outs: WTL.update(out)
    WTL_sort=sorted([i for i in WTL.keys() if i.endswith('AB')])+sorted([i for i in WTL.keys() if i.endswith('CD')])
    for name in WTL_sort:
        PrintSummary(versions[0].isClassNumeric, generalLogFile, generalLogFile2, name, WTL[name]) #write a summary of results on the general log file(s)        
    generalLogFile.close()
    generalLogFile2.close()
    return outs

def buisnessCaseforSingleProductThread(arg_tup):
    """Caller for buisnessCaseforSingleProduct which accepts a single tuple as args"""
    return newBuisnessCaseForSingleProduct(arg_tup[0], arg_tup[1], arg_tup[2])

def newBuisnessCaseForSingleProduct(version_sets, version_names, logfiles):
    print "Loading files: "+ ", ".join(version_names)
    WTLproduct={}
    learner=None
    for release_ind in range(2,len(version_names)):
        training_sets=version_sets[0:release_ind]
        training_sets_names= version_names[0:release_ind]
        test_set=version_sets[release_ind]
        test_set_name=version_names[release_ind]
        out, learner =buisnessCaseforSingleRelease(training_sets, test_set, training_sets_names, test_set_name, logfiles, learner)
        WTLproduct.update(out)
    return WTLproduct

def buisnessCaseforSingleProduct(version_sets, version_names, logfiles):
    
    print "Loading files: "+ ", ".join(version_names)
    WTLproduct={}
    for release_ind in range(2,len(version_names)):
        training_sets=version_sets[0:release_ind]
        training_sets_names= version_names[0:release_ind]
        test_set=version_sets[release_ind]
        test_set_name=version_names[release_ind]
        out, _=buisnessCaseforSingleRelease(training_sets, test_set, training_sets_names, test_set_name, logfiles)
        WTLproduct.update(out)
        #return out #shortcut for one eval
#    for name, WTL in WTLproduct.iterItems():
#        PrintSummary(test_set.isClassNumeric, logfiles[0], logfiles[1], test_set_name, WTL) #write a summary of results on the general log file(s)        
    #plotWTL(WTLproduct, re.sub('[^a-zA-Z]+', '', test_set_name))
    return WTLproduct
    

    

def buisnessCaseforSingleReleaseX2(training_sets, test_set, training_sets_names, test_set_name, logfiles):
    train_delta_set=createDeltaSet(training_sets, extras=True)
    test_delta_set= createDeltaSet([training_sets[-1], test_set], extras=True)
    ABtrain, CDtrain = train_delta_set.AbCdSplit()
    ABtest,  CDtest  =  test_delta_set.AbCdSplit()
    q=Queue.Queue()
    tAB=multiprocessing.Process(target=buisnessHalfCaseThread, args=(q, ABtrain, ABtest, test_set_name+'_AB', logfiles[0], logfiles[1]))
    tCD=multiprocessing.Process(target=buisnessHalfCaseThread, args=(q, CDtrain, CDtest, test_set_name+'_CD', logfiles[0], logfiles[1]))
    tAB.daemon=True; tCD.daemon=True
    WTLrelease={}
    tAB.start(); tCD.start()
    WTLrelease.update(q.get()); WTLrelease.update(q.get())
   
    return WTLrelease

def buisnessCaseforSingleRelease(training_sets, test_set, training_sets_names, test_set_name, logfiles, learner=None):
    train_delta_set=createDeltaSet(training_sets, extras=True)
    test_delta_set= createDeltaSet([training_sets[-1], test_set], extras=True)
    ABtrain, CDtrain = train_delta_set.AbCdSplit()
    ABtest,  CDtest  =  test_delta_set.AbCdSplit()
    WTLrelease={}
    #print("Starting %s half-sets"%test_set_name)
    if learner:
        L1=learner[0]
        L2=learner[1]
    else:
        L1=None
        L2=None
    ABresult, L1a=buisnessHalfCase(ABtrain, ABtest, test_set_name+'_AB', logfiles[0], logfiles[1], L1)
    CDreslut, L2a=buisnessHalfCase(CDtrain, CDtest, test_set_name+'_CD', logfiles[0], logfiles[1], L2)    
    if not L1:
        L1=L1a
        L2=L2a
    WTLrelease.update({test_set_name+'_AB':ABresult})
    WTLrelease.update({test_set_name+'_CD':CDreslut})
    print("Finished %s half-sets"%test_set_name)    
    return WTLrelease, [L1,L2]

def buisnessHalfCaseThread(q, trainDs, testDs, test_set_name, generalLogFile, generalLogFile2):
    q.put({test_set_name[-2:]:buisnessHalfCase(trainDs, testDs, test_set_name)})

def buisnessHalfCase(trainDs, testDs, test_set_name, generalLogFile=None, generalLogFile2=None, learner=None):   
    #idea: sample towards most recent
    logFile=open('logs/'+test_set_name+'_log.txt', 'w')    
    learners = []
    #for k in [1, 2, 3, 4]
    if not learner:
        learners='GaussianNB'
#        for j in np.linspace(0.05, 0.95, 7):
#            learners+=['PEEKER(C, 3, %1.2f, %1.2f, 0)'%(j, i) for i in np.linspace(0.05, 0.95, 7)]
    else:
        learners=[learner]
    print "%s: %u learner(s)"%(test_set_name, len(learners))
    noiseLevels=[0]
    performances = InitializePerformances(learners, noiseLevels)
    InitializePerformanceForRepeatStep(performances, 1)
    CompareLearnersShort(dataSet, learners, classValue, repeats, folds, performances)
#    RunLearners(learners, noiseLevels, trainDs, testDs, performances, run=1, fold=1)
#    PrintPerformancesForExperimentalRun(logFile, performances, trainDs, testDs, '1', run=1, fold=1)
#    winTieLoss = ComparePerformances(testDs.isClassNumeric, performances) #compute the Win,Tie,Loss values for the collected measures applying Wilcoxon statistical tests
#    if not learner:    
#        for treatment, result in winTieLoss['f'][0].iteritems():
#            if result==max(winTieLoss['f'][0].values()):
#                learner=treatment
#                break
#    PrintComparisons(testDs.isClassNumeric, logFile, winTieLoss, target_class) #write the Win,Tie,Loss values in the specific log file    
#    logFile.close()    
#    if False and generalLogFile and generalLogFile2:    
#        PrintSummary(testDs.isClassNumeric, generalLogFile, generalLogFile2, test_set_name, winTieLoss) #write a summary of results on the general log file(s)
    return winTieLoss, learner

def plotWTL(WTLproduct, setname):  
    halves={}
    fig=plt.figure()
    fig.hold(True)
    ax=fig.add_subplot(111)
    colorcycle=cycle(colors)
    
    for case in WTLproduct[0].keys():
        if halves.has_key(case):
            halves[case].append([i[case] for i in WTLproduct])
        else:
            halves.update({case: [[i[case] for i in WTLproduct]]})
    for case_label in halves.keys():
        results=halves[case_label][0]
        pd=[i['pd'][0] for i in results]
        pf=[i['pf'][0] for i in results]
        markercycle=cycle(['x', '+', 's', 'o'])
        color=colorcycle.next()
        for learner_label in pd[0].keys():
            learnerPD=[i[learner_label] for i in pd]
            learnerPF=[i[learner_label] for i in pf]
            pdLabel=' '.join([case_label, 'pd', learner_label])
            pfLabel=' '.join([case_label, 'pf', learner_label])
            ax.plot(learnerPD, c=color, ls='-',  marker=markercycle.next(), label=pdLabel)
            ax.plot(learnerPF, c=color, ls='--', marker=markercycle.next(), label=pfLabel)
    ax.set_ylim(0,1)
    handles, labels=ax.get_legend_handles_labels()
    fig.legend(handles, labels, loc=2, prop={'size':7})
    fig.suptitle(setname)
    fig.savefig('graphs/'+setname+'.png')
    plt.close(fig)
      
            
            
            
def mapTemporalCSV():
    srcDir="data_csv/"
    dataSets=os.listdir(srcDir)
    dataSets.sort()
    multiSets=[]
    outs=[]
    for s in dataSets:
        diff= dataSets[:]
        diff.remove(s)
        diff=[i[:4] for i in diff]
        if diff.__contains__(s[:4]):
            multiSets.append(s)
    nestedSets=[]
    while multiSets:
        group=[multiSets[0]]
        multiSets=multiSets[1:]
        group+=[i for i in multiSets if i.startswith(group[0][:4])]
        multiSets=[i for i in multiSets if not i.startswith(group[0][:4])]
        nestedSets.append(group)
    for group in nestedSets:
        print group
        out=mapTemporalSets(srcDir, group, group[0][:-8])
#        out.exportDataSetToArff("output/"+group[0][:-8]+'.arff')
#        outs.append(out)
    
        
def mapTemporalSets(srcDir, dataSets, savefigname=None):
    versions=[]
    for dataSet in dataSets:
        versions.append(readCK_CSV(srcDir + dataSet))
    version_names=['.'.join(i.split('.')[:-1]) for i in dataSets]
    #Select features from delta set and project
    delta_set=createDeltaSet(versions)
    delta_set_extras=createDeltaSet(versions, extras=True)
    delta_discrete=delta_set.FayyadIraniDiscretizer("class")
    overall=mergeDataSets(versions)
    overall=overall.FayyadIraniDiscretizer("class")
    features=delta_discrete.featureReductionViaInfoGain("class", 0.50)
    #features=overall.featureReductionViaInfoGain("class", 0.5)
    #print [i.estimateID() for i in versions]
    versions=[i.applyFSS(features) for i in versions]
    versions=[i.NormalizeDataSet(False) for i in versions]
    #print [i.estimateID() for i in versions]
    projected=[ProjectViaFastMap(i) for i in versions]
    
    #Plot
    fig=plt.figure()
    fig.hold(True)
    ax=fig.add_subplot(111)
    X = []
    Y = []
    for ind, ver in enumerate(projected):
        for instances in [i for i in ver.instances if int(i.attributeValues[-1])],[i for i in ver.instances if not int(i.attributeValues[-1])]:
        
            X.append([i.x for i in instances])
            Y.append([i.y for i in instances])
            #maxs=max([i.nrInstances for i in clusters[ind]])
            #S=[300*np.sqrt(i.nrInstances/maxs) for i in clusters[ind]]
            S=15
            C=colors[ind]
            mlam=lambda i: 'x' if int(i.attributeValues[-1]) else '+'
            M=[mlam(i) for i in instances]
            ax.scatter(X[-1],Y[-1],S,C,label=version_names[ind], marker=M[0])
        if ind<len(projected)-1:
            for row in ver.instances:
                ver2=projected[ind+1]
                for row2 in ver2.instances:
                    if row2.attributeValues[2]==row.attributeValues[2]:
                        break
                if not row2.attributeValues[2]==row.attributeValues[2]:
                    continue
                def1=int(row.attributeValues[-1])
                def2=int(row2.attributeValues[-1])
                if def1:
                    if def2 : deftype="A-Unfixed"; C='b'
                    else    : deftype="B-Fixed"; C='g'
                else:
                    if def2 : deftype="C-Broken"; C='r'
                    else    : deftype="D-Fine"; C=None
                
                if C: ax.plot([row.x,row2.x],[row.y,row2.y], marker=None, color=C, label=deftype)
    X_list=[]; Y_list=[]
    for i in X: X_list+=i
    for i in Y: Y_list+=i
    X=sorted(X_list); Y=sorted(Y_list)
    ax.set_xlim(X[int(len(X)*0.01)], X[int(len(X)*0.99)])            
    ax.set_ylim(Y[int(len(Y)*0.01)], Y[int(len(Y)*0.99)])
    handles, labels = ax.get_legend_handles_labels()
    HLdict={}
    for h, l in zip(handles, labels): HLdict.update({l:h})
    labels = HLdict.keys()
    labels.sort()
    handles=[HLdict[label] for label in labels]
    fig.legend(handles, labels)
    fig.show()
    if savefigname:
        fig.savefig('graphs/'+savefigname+'.png')
        plt.close(fig)
    
    
    fig2=plt.figure(2)
    fig2.hold(True)
    ax2=fig2.add_subplot(111)
    delta_discrete=delta_set_extras.FayyadIraniDiscretizer("class")
    features=delta_discrete.featureReductionViaInfoGain("class", 0.25)
    delta_set=delta_discrete.applyFSS(features)
    delta_set=delta_set.NormalizeDataSet(False)
    delta_set=ProjectViaFastMap(delta_set)
    quadrants=[SplitIntoQuadrants(i, "C2") for i in delta_set]
    clusters=[]
    for ver in quadrants:
        clusters.append(GridClusteringWithNoFilter(ver))
        clusters[-1]=[i for i in clusters[-1] if i.nrInstances!=0]
    centroids=[GetClusterCentroids(i) for i in clusters]
    X=[i.x for i in delta_set.instances]
    Y=[i.y for i in delta_set.instances]
    #wiggle
    X=[i+(np.random.random()-0.5)*np.std(X)/2 for i in X]
    Y=[i+(np.random.random()-0.5)*np.std(Y)/2 for i in Y]
    classes=[i.attributeValues[-1] for i in delta_set.instances]
    C=['b' if i=='A' else 'g' if i=='B' else 'r' if i=='C' else 'c' for i in classes]
    for x, y, col, clas in zip(X, Y, C, classes):
        ax2.scatter(x,y,c=col, marker='x', label=clas)
    handles, labels = ax2.get_legend_handles_labels()
    HLdict={}
    for h, l in zip(handles, labels): HLdict.update({l:h})
    labels = HLdict.keys()
    labels.sort()
    handles=[HLdict[label] for label in labels]
    fig2.legend(handles, labels)
    fig2.show()
    
    if savefigname:
        fig2.savefig('graphs/'+savefigname+'2.png')
        plt.close(fig2)
    
    return projected
###### Note: add DataSet.splitByClass() and apply it to the fig2 stuff
######       also, find a way to work ID in        
###########################################################################

def runCSV():
    srcDir="data_csv/"
    dataSets=os.listdir(srcDir)
    dataSets.sort()
    multiSets=[]
    outs=[]
    for s in dataSets:
        diff= dataSets[:]
        diff.remove(s)
        diff=[i[:4] for i in diff]
        if diff.__contains__(s[:4]):
            multiSets.append(s)
    nestedSets=[]
    while multiSets:
        group=[multiSets[0]]
        multiSets=multiSets[1:]
        group+=[i for i in multiSets if i.startswith(group[0][:4])]
        multiSets=[i for i in multiSets if not i.startswith(group[0][:4])]
        nestedSets.append(group)
    for group in nestedSets:
        print group
        out=experimentTemporal(srcDir, group)
        out.exportDataSetToArff("output/"+group[0][:-8]+'.arff')
        outs.append(out)

    
def plotouts(outs, savefigname=None): #not working
    overall=mergeDataSets(outs)
    overall_discrete=overall.FayyadIraniDiscretizer("class")
    features=overall_discrete.featureReductionViaInfoGain('class', 0.5) 
    #print [i.estimateID() for i inouts]
    #outs=[i.applyFSS(features) for i inouts]
    outs=[i.applyFSS(features) for i in outs]
    outs=[i.NormalizeDataSet(False) for i in outs]
    #print [i.estimateID() for i inouts]
    projected=[ProjectViaFastMap(i) for i in outs]
    quadrants=[SplitIntoQuadrants(i, "C2") for i in projected]
    clusters=[]
    for ver in quadrants:
        clusters.append(GridClusteringWithNoFilter(ver))
        clusters[-1]=[i for i in clusters[-1] if i.nrInstances!=0]
    centroids=[GetClusterCentroids(i) for i in clusters]
    fig=plt.figure()
    fig.hold(True)
    ax=fig.add_subplot(111)
    X = []
    Y = []
    for ind, ver in enumerate(centroids):
        X.append([i.x for i in ver.instances])
        Y.append([i.y for i in ver.instances])
        maxs=max([i.nrInstances for i in clusters[ind]])
        S=[300*np.sqrt(i.nrInstances/maxs) for i in clusters[ind]]
        C=colors[ind]*len(X)
#        ax.scatter(X[-1],Y[-1],S,C,label=dataSets[ind][:-5])
#        if ver!=centroids[-1]:
#            for inst in ver.instances:
#                fnn=centroids[ind+1].instances
#                dists=[ver.squareDistanceOnProjected(inst,i) for i in fnn]
#                fnn=[i for i in fnn if ver.squareDistanceOnProjected(inst,i)==min(dists)]
#                fnn=fnn[0]
#                X=[inst.x, fnn.x]; Y=[inst.y, fnn.y]
#                ax.plot(X,Y,C[0])
   
    meanX=[np.mean(X[i]) for i in range(len(X))]
    meanY=[np.mean(Y[i]) for i in range(len(X))]
    sdX=[np.std(X[i]) for i in range(len(X))]
    sdY=[np.std(Y[i]) for i in range(len(X))]
    
    transX=np.diff(meanX)
    scaleX=[i*100 for i in np.divide(np.diff(sdX),sdX[:-1])]
    transY=np.diff(meanY)
    scaleY=[i*100 for i in np.divide(np.diff(sdY),sdY[:-1])]
        
        
    handles, labels = ax.get_legend_handles_labels()                
    fig.legend(handles, labels)
    fig.show()
    if savefigname:
        fig.savefig('graphs/'+savefigname+'.png')
        plt.close(fig)
    
    return transX, transY, scaleX, scaleY

def createDeltaSet(versions, extras=False):
#Create a set of deltas for feature selection        
    modnames=[]
    for i in versions: modnames+=[j.attributeValues[2] for j in i.instances]
    #build dictionary of modules with lists of version data
    module_instance_dict={}
    for modname in modnames:
        module_instance_dict.update({modname:[None]*len(versions)})
    modnames=module_instance_dict.keys()
    for ver_ind, ds in enumerate(versions):
        for row in ds.instances:
            name=row.attributeValues[2]
            module_instance_dict[name][ver_ind]=row.cloneInstance()    
    delta_set=DataSet(False)
    for attr in versions[0].attributes[:-1]:
        newattr=attr.cloneAttribute()      
        newattr.isNumeric=attr.isNumeric    
        newattr.attributeName+="_delta"
        newattr.attributValues=[]
        newattr.Id=len(delta_set.attributes)
        delta_set.attributes.append(newattr)
    if extras:
        for attr in versions[0].attributes[3:]:
            newattr=attr.cloneAttribute()      
            newattr.isNumeric=attr.isNumeric    
            newattr.attributeName+="_A"
            newattr.attributValues=[]
            newattr.Id=len(delta_set.attributes)
            delta_set.attributes.append(newattr)
        for attr in versions[0].attributes[3:-1]:
            newattr=attr.cloneAttribute()      
            newattr.isNumeric=attr.isNumeric    
            newattr.attributeName+="_B"
            newattr.attributValues=[]
            newattr.Id=len(delta_set.attributes)
            delta_set.attributes.append(newattr)
        
    classattr=Attribute()
    classattr.attributeName="DefectType"
    classattr.attributeValues=["A", "B", "C", "D"]
    #classattr.attributeValues=['0','1']
    classattr.Id=len(delta_set.attributes)
    delta_set.attributes.append(classattr)
    for version_list in module_instance_dict.values():
        for ind, row in enumerate(version_list[:-1]):
            row2=version_list[ind+1]
            deftype="none"
            if row and row2:
                def1=int(row.attributeValues[-1])
                def2=int(row2.attributeValues[-1])
                if def1:
                    if def2 : deftype="A"
                    else    : deftype="B"
                else:
                    if def2 : deftype="C"
                    else    : deftype="D"
                
                newinst=row.cloneInstance()
                newinst.attributeValues=newinst.attributeValues[:-1]
                newinst.attributeValues[3:]=[str(float(b)-float(a)) for a,b in zip(row.attributeValues[3:-1],row2.attributeValues[3:-1])]
                if extras:
                    newinst.attributeValues+=row.attributeValues[3:]
                    newinst.attributeValues+=row2.attributeValues[3:-1]
                newinst.attributeValues.append(deftype)
                #newinst.attributeValues.append("1" if deftype==target_class else "0")
                delta_set.addInstance(newinst)
    return delta_set

def experimentTemporal(srcDir="data_csv/", dataSets=['camel-1.0.csv', 'camel-1.2.csv', 'camel-1.4.csv', 'camel-1.6.csv'], savefigname=None):
    versions=[]
    for dataSet in dataSets:
        ind = dataSet.rfind(".")
        dataSet = dataSet[0:ind]
        versions.append(readCK_CSV(srcDir + dataSet + ".csv"))
    modnames=[[j.attributeValues[2] for j in i.instances] for i in versions]
###print common names
#    for ver, lst in enumerate(modnames):
#        if ver==0: continue
#        for item in lst:
#            if not modnames[ver-1].__contains__(item):
#                print ver, item

###count common names
#    allnames=[]
#    for name in modnames[0]:
#        found=True
#        for ver in modnames[1:]:
#            if not ver.__contains__(name): 
#                found=False
#        if found: allnames.append(name)
#    print "Name counts:"
#    print [100*len(allnames)/len(i) for i in modnames]
    newset=DataSet(False)
    for attr in versions[0].attributes[3:]:
        newattr=attr.cloneAttribute()    
        newattr.isNumeric=attr.isNumeric
        newattr.attributeName+="_A"
        newattr.attributeValues=[]
        newattr.Id=len(newset.attributes)
        newset.attributes.append(newattr)
    newset.attributes[-1].attributeValues=['0','1']
    for attr in versions[0].attributes[3:-1]:
        newattr=attr.cloneAttribute()      
        newattr.isNumeric=attr.isNumeric    
        newattr.attributeName+="_B"
        newattr.attributeValues=[]
        newattr.Id=len(newset.attributes)
        newset.attributes.append(newattr)
    for attr in versions[0].attributes[3:-1]:
        newattr=attr.cloneAttribute()      
        newattr.isNumeric=attr.isNumeric    
        newattr.attributeName+="_delta"
        newattr.attributValues=[]
        newattr.Id=len(newset.attributes)
        newset.attributes.append(newattr)

    classattr=Attribute()
    classattr.attributeName="DefectType"
    classattr.attributeValues=["A", "B", "C", "D"]
    #classattr.attributeValues=['0','1']
    classattr.Id=len(newset.attributes)
    newset.attributes.append(classattr)
    for ind, ds in enumerate(versions[:-1]):  
        types=[]
        ns=versions[ind+1]
        for row in ds.instances:
            deftype="none"
            if modnames[ind+1].__contains__(row.attributeValues[2]):
                ind2=modnames[ind+1].index(row.attributeValues[2])
                row2=ns.instances[ind2]
                def1=int(row.attributeValues[-1])
                def2=int(row2.attributeValues[-1])
                if def1:
                    if def2 : deftype="A"
                    else    : deftype="B"
                else:
                    if def2 : deftype="C"
                    else    : deftype="D"
            
                newinst=row.createInstance()
                newinst.attributeValues=row.attributeValues[3:]
                if not ['0','1'].__contains__(newinst.attributeValues[-1]):
                    newinst.attributeValues[-1]='1'
                newinst.attributeValues+=(row2.attributeValues[3:-1])
                newinst.attributeValues+=[str(float(b)-float(a)) for a,b in zip(row.attributeValues[3:-1],row2.attributeValues[3:-1])]
                newinst.attributeValues.append("1" if deftype==target_class else "0")
                newset.addInstance(newinst)
            types.append(deftype)
        counts=[100.0*types.count(i)/len(types) for i in ['A', 'B', 'C', 'D', 'none']]
        print "Type A: %u%%    B: %u%%    C: %u%%    D: %u%%    NoMatch: %u%%"%tuple(counts)
    print ""    
    return newset
    
    
  
def runVersions():
    srcDir="data_arff/"
    dataSets=os.listdir(srcDir)
    dataSets.sort()
    multiSets=[]
    for s in dataSets:
        diff= dataSets[:]
        diff.remove(s)
        diff=[i[:4] for i in diff]
        if diff.__contains__(s[:4]):
            multiSets.append(s)
    nestedSets=[]
    while multiSets:
        group=[multiSets[0]]
        multiSets=multiSets[1:]
        group+=[i for i in multiSets if i.startswith(group[0][:4])]
        multiSets=[i for i in multiSets if not i.startswith(group[0][:4])]
        nestedSets.append(group)
        TX=[] 
        TY=[]
        SX=[] 
        SY=[]
    for group in nestedSets:
        print group
        transX, transY, scaleX, scaleY = plotVersions(srcDir, group, group[0].split('-')[0])
        TX.append(transX)
        TY.append(transY)
        SX.append(scaleX)
        SY.append(scaleY)
    fig=plt.figure()
    fig.hold(True)
    ax1=fig.add_subplot(211)
    ax2=fig.add_subplot(212)

    for ind, (tx, ty, sx, sy) in enumerate(zip(TX,TY,SX,SY)):
        if len(sx)<3: continue
        color=colorcycle.next()
        ax1.plot(tx,ty, c=color, marker='.', markersize=10.0, ls='-', label=nestedSets[ind][0].split('-')[0])
        ax2.plot(sx,sy, c=color, marker='.', markersize=10.0, ls='-')
        ax1.set_xlabel("Translation in X")
        ax1.set_ylabel("Translation in Y")
        ax2.set_xlabel("Scale in X")
        ax2.set_ylabel("Scale in Y")
    lines, labels = ax1.get_legend_handles_labels()   
    fig.legend(lines, labels, 'upper left')
    fig.show()
    return fig
        

    
        
        
            
  
       
def plotVersions(srcDir, dataSets, savefigname=None):
    versions=[]
    for dataSet in dataSets:
        ind = dataSet.rfind(".")
        dataSet = dataSet[0:ind]
        versions.append(ReadDataSet(srcDir + dataSet + ".arff"))
    overall=mergeDataSets(versions)
    overall_discrete=overall.FayyadIraniDiscretizer("class")
    features=overall_discrete.featureReductionViaInfoGain('class', 0.5) 
    #print [i.estimateID() for i in versions]
    #versions=[i.applyFSS(features) for i in versions]
    versions=[i.applyFSS(features) for i in versions]
    versions=[i.NormalizeDataSet(False) for i in versions]
    #print [i.estimateID() for i in versions]
    projected=[ProjectViaFastMap(i) for i in versions]
    quadrants=[SplitIntoQuadrants(i, "C2") for i in projected]
    clusters=[]
    for ver in quadrants:
        clusters.append(GridClusteringWithNoFilter(ver))
        clusters[-1]=[i for i in clusters[-1] if i.nrInstances!=0]
    centroids=[GetClusterCentroids(i) for i in clusters]
    fig=plt.figure()
    fig.hold(True)
    ax=fig.add_subplot(111)
    X = []
    Y = []
    for ind, ver in enumerate(centroids):
        X.append([i.x for i in ver.instances])
        Y.append([i.y for i in ver.instances])
        maxs=max([i.nrInstances for i in clusters[ind]])
        S=[300*np.sqrt(i.nrInstances/maxs) for i in clusters[ind]]
        C=colors[ind]*len(X)
        ax.scatter(X[-1],Y[-1],S,C,label=dataSets[ind][:-5])
#        if ver!=centroids[-1]:
#            for inst in ver.instances:
#                fnn=centroids[ind+1].instances
#                dists=[ver.squareDistanceOnProjected(inst,i) for i in fnn]
#                fnn=[i for i in fnn if ver.squareDistanceOnProjected(inst,i)==min(dists)]
#                fnn=fnn[0]
#                X=[inst.x, fnn.x]; Y=[inst.y, fnn.y]
#                ax.plot(X,Y,C[0])
   
    meanX=[np.mean(X[i]) for i in range(len(X))]
    meanY=[np.mean(Y[i]) for i in range(len(X))]
    sdX=[np.std(X[i]) for i in range(len(X))]
    sdY=[np.std(Y[i]) for i in range(len(X))]
    
    transX=np.diff(meanX)
    scaleX=[i*100 for i in np.divide(np.diff(sdX),sdX[:-1])]
    transY=np.diff(meanY)
    scaleY=[i*100 for i in np.divide(np.diff(sdY),sdY[:-1])]
        
        
    handles, labels = ax.get_legend_handles_labels()                
    fig.legend(handles, labels)
    fig.show()
    if savefigname:
        fig.savefig('graphs/'+savefigname+'.png')
        plt.close(fig)
    
    return transX, transY, scaleX, scaleY
    
       
       
def experimentID():
    srcDir="data_arff/"
    dataSets=os.listdir(srcDir)
    

    for dataSet in dataSets:
        ind = dataSet.rfind(".")
        dataSet = dataSet[0:ind]
        data=ReadDataSet(srcDir + dataSet + ".arff")
        ID, cert = data.estimateID()
        print "%s: %1.1f"%(dataSet, ID)
    
     
       

def experiment(srcDir = "data_arff/"):
    logDir = "logs/"
    folds = 3
    repeats = 3
    global target_class

    noiseLevels = [0]
    #dataSets=["camel.arff", "ivy.arff", "jedit.arff", "log4j.arff", "lucene.arff", "synapse.arff", "velocity.arff", "xalan.arff", "xerces.arff", "ant.arff"]
    dataSets=['temp.arff']    
    if srcDir=="data_arff/":
        dataSets=['ant-1.7.arff','ivy-1.1.arff', 'jedit-4.1.arff', 'log4j-1.1.arff', 'lucene-2.4.arff', 'poi-3.0.arff', 'synapse-1.2.arff', 'velocity-1.6.arff', 'xalan-2.6.arff', 'xerces-1.4.arff']
        target_class='1'
    #dataSets = ['cocomo81.arff', 'cocomo81e.arff', 'cocomo81o.arff', 'cocomo81s.arff', 'nasa93.arff', 'nasa93c1.arff', 'nasa93c2.arff', 'nasa93c5.arff', 'china.arff', 'miyazaki.arff']
    
    #learners = ['MyNB', 'PEEKER(C, 2, 0.25, 0.5, 1)']#, 'NB', 'RF', 'LR', 'M5P',   
    learners=['PEEKER(C, 2, 0.25, 9.5, 1)']
    #learners=['GaussianNB', 'MyNB']
#    for alpha in np.linspace(0,1,6):
#        for discretize in [True, False]:
#            for binarize in np.linspace(0,1,6):
#                learners+=['BernoulliNB(%f, %f, %s)'%(alpha, binarize, str(discretize))]
#            #learners+=['MultinomialNB(%f, %s)'%(alpha, str(discretize))]
    strD = getCurrentDate()
    generalLogFile = open(logDir + "log_" + strD + "_1.txt", "w")
    generalLogFile2 = open(logDir + "log_" + strD + "_2.txt", "w")

    for dataSet in dataSets:
        ind = dataSet.rfind(".")
        dataSet = dataSet[0:ind]
        CompareLearners(srcDir + dataSet+".arff", learners, noiseLevels, strD, generalLogFile, generalLogFile2, target_class, repeats, folds)
        
    generalLogFile.close()
    generalLogFile2.close()

def export():
    srcDir = "data_arff/"
    outputDir = "output/"
    dataSets = ['miyazaki.arff']
    fssPercentage = 0.25
    normalize=1
    stopingCriteria = "C"
        
    for dataSet in dataSets:
        ExportPeeker(srcDir+dataSet, outputDir, fssPercentage, normalize, stopingCriteria)

def CompareLearnersShort(dataSet, learners, classValue, repeats, folds, performances):
    for i in range(repeats):
        crossValDataSets = dataSet.split(folds) #randomize the order of instances and split the data set by the number of folds
        InitializePerformanceForRepeatStep(performances, i)
        for j in range(folds):
            print str(i) + " " + str(j)        
            (trainDs,testDs) = GetTrainAndTestSets(crossValDataSets, fold=j)
            #train and test each learner and store the corresponding results on performances object
            RunLearners(learners, noiseLevels, trainDs, testDs, performances, run=i, fold=j)
                        #write the performances results for the current experimental run in the specific log file

def CompareLearners(dataSetName, learners, noiseLevels, strD, generalLogFile, generalLogFile2, classValue, repeats, folds):
    #This function tests all the specified learners on the given data set.
    #dataSetName - the name of the data set
    #learners - a list containing the definition of all the learners to be tested
    #strD - date and time when the experiment has started to execute (serves as id for the current experiment)
    #generalLogFile, generalLogFile2 - the main files of the experiment (the script writes a summary of the experiemtal results for all the data sets)
    #classVal - in case of classification problem this variable contains the target class for which the performances values are printed    
    #repeats - number of repeats for the cross-validation experiment
    #folds - number of folds for the cross-validation experiment 

    (procFileName, logFileName) = DefineFileNames(dataSetName,strD)
    logFile = open("logs/" + logFileName, 'w')
    print procFileName
    dataSet = ReadDataSet(dataSetName)
    performances = InitializePerformances(learners, noiseLevels) #this object will contain the accuracy results collected on each experimental run
    for i in range(repeats):
        crossValDataSets = dataSet.split(folds) #randomize the order of instances and split the data set by the number of folds
        InitializePerformanceForRepeatStep(performances, i)
        for j in range(folds):
            print str(i) + " " + str(j)        
            (trainDs,testDs) = GetTrainAndTestSets(crossValDataSets, fold=j)
            #train and test each learner and store the corresponding results on performances object
            RunLearners(learners, noiseLevels, trainDs, testDs, performances, run=i, fold=j)
                        #write the performances results for the current experimental run in the specific log file
            PrintPerformancesForExperimentalRun(logFile, performances, trainDs, testDs, classValue, run=i, fold=j)
    winTieLoss = ComparePerformances(dataSet.isClassNumeric, performances) #compute the Win,Tie,Loss values for the collected measures applying Wilcoxon statistical tests
    PrintComparisons(dataSet.isClassNumeric, logFile, winTieLoss, classValue) #write the Win,Tie,Loss values in the specific log file
    PrintSummary(dataSet.isClassNumeric, generalLogFile, generalLogFile2, procFileName, winTieLoss) #write a summary of results on the general log file(s)
    logFile.close()


def ReadDataSet(dataSetName):
    dataSet = readArff(dataSetName)           #read arff file and load instances in the dataSet
    dataSet.computeClassStatistics()       #compute class statistics
    return dataSet

def GetTrainAndTestSets(crossValDataSets, fold):
    crossValDataSets1 = crossValDataSets[:]
    testDs = crossValDataSets1[fold]
    crossValDataSets1.remove(testDs)
    trainDs = mergeDataSets(crossValDataSets1)
    return (trainDs, testDs)

def RunLearners(learners, noiseLevels, trainDs, testDs, performances, run, fold):
        for noiseLevel in noiseLevels:
                noisyTrainDs = InjectNoise(trainDs, noiseLevel)
                for learner in learners:
                        learnerName = GetLearnerName(learner, noiseLevel, noiseLevels)
                        RunLearner(learnerName, noisyTrainDs, testDs, performances, run, fold) #train in noisy data

def SKAdapter(learner, trainDs, testDs, params=[]):
    t = datetime.now()    
    features=[[float(j) for j in i.attributeValues[:-1]] for i in trainDs.instances]
    classes=[i.attributeValues[-1] for i in trainDs.instances]
    learner=eval('bayes.'+learner+'('+', '.join(params)+')')
    learner.fit(features,classes)
    trainingRunTime = datetime.now() - t

    #testing process
    t = datetime.now()
    predictions = []
    for instance in testDs.instances:
        predClass = learner.predict([float(j) for j in instance.attributeValues[:-1]])
        predictions.append((instance.classVal, predClass))
    testingRunTime = datetime.now() - t
    return (predictions, trainDs, trainingRunTime, testingRunTime, trainingRunTime + testingRunTime)

def RunLearner(learner, trainDs, testDs, performances, repeat, fold):
    (learnerName, params) = ParseLearner(learner)
    predictions = None
    dataSet = None
    runtime = None
    if learnerName == "BernoulliNB" or learnerName.startswith("BernoulliNB"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = SKAdapter(learnerName, trainDs, testDs, params)
    elif learnerName == "GaussianNB" or learnerName.startswith("GaussianNB"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = SKAdapter(learnerName, trainDs, testDs)
    elif learnerName == "MultinomialNB" or learnerName.startswith("MultinomialNB"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = SKAdapter(learnerName, trainDs, testDs, params)
    elif learnerName == "NB" or learnerName.startswith("NB_noisy_"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = NaiveBayes(trainDs, testDs)
    elif learnerName == "MyNB" or learnerName.startswith("MyNB_noisy_"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = MyNaiveBayes(trainDs, testDs)
    elif learnerName == "RF" or learnerName.startswith("RF_noisy_"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = RandomForest(trainDs, testDs)
    elif learnerName == "NN" or learnerName.startswith("NN_noisy_"):
        (predictions, dataSet, trainTime, testTime, allRunTime) = NearestNeighbor(trainDs, testDs, k=int(params[0]))
    elif learnerName == "PEEKER" or learnerName.startswith("PEEKER_noisy_"):
                (predictions, dataSet, trainTime, testTime, allRunTime) = PEEKER(trainDs, testDs, stopingCriteria = params[0], k = int(params[1]), fssPercentage = float(params[2]), theta = float(params[3]), normalize = int(params[4]))
    elif learnerName == "LR" or learnerName.startswith("LR_noisy_"):
        (predictions, dataSet, trainTime, testTime, allRunTime) = LinearRegression(trainDs, testDs)
    elif learnerName == "M5P" or learnerName.startswith("M5P_noisy_"):
        (predictions, dataSet, trainTime, testTime, allRunTime) = M5P(trainDs, testDs)
    else:
        raise Exception("RunLearner - unknown learner: " + str(learner) + "!")

    EstimatePerformanceForExperimentalRun(performances, predictions, dataSet, trainTime, testTime, allRunTime, repeat, fold, learner)

def ParseLearner(command):
    #this methods parses the given command string finding the learner and the corresponding parameters
    command = command.strip()
    learnerName = ""
    parameters = []    
    ind1 = command.find("(")
    ind2 = command.find(")")
    if ind1 == -1 or ind2 == -1:
        learnerName = command
    else:
        learnerName = command[0:ind1]
        parameters = command[ind1+1:ind2].split(',')
        for i in range(len(parameters)): parameters[i] = parameters[i].strip()
    return (learnerName,parameters)

def PEEKER(trainDs, testDs, stopingCriteria, k, fssPercentage, theta, normalize):
        t1 = datetime.now()
        condensed = PEEKER_Train(trainDs, stopingCriteria, fssPercentage, normalize)
        t2 = datetime.now()
        predictions = PEEKER_Test(testDs, condensed, k, theta, normalize)
        t3 = datetime.now()
        trainTime=t2-t1
        testTime=t3-t2
        allRunTime=t3-t1
        return (predictions, condensed, trainTime, testTime, allRunTime)

def PEEKER_Train(trainDs1, stopingCriteria, fssPercentage, normalize):
        fssTrainDs = FeatureSelectionViaInfoGain(fssPercentage, trainDs1)
        normTrainDs = NormalizeTrainDataSet(normalize, fssTrainDs)        
        projected = ProjectViaFastMap(normTrainDs)
        dendrogram = SplitIntoQuadrants(projected, stopingCriteria)
        clusters = GridClusteringWithNoFilter(dendrogram)
        condensed = GetClusterCentroids(clusters)        
        return condensed

def PEEKER_Test(testDs1, condensed, k, theta, normalize):
    fssTestDs = ApplyFeatureSelectionOnTestData(testDs1, condensed.attributes)
    testDs = NormalizeTestDataSet(normalize, fssTestDs, condensed.attributes)
    predictions = []
    for instance in testDs.instances:
        predictions.append((instance.classVal, condensed.estimateClassFromNearestNeighbors_Exponential(instance, k, theta)))
    return predictions

def InjectNoise(origDataSet, injectIterations):
        if injectIterations == 0:
                return origDataSet
        else:
                newDataSet = TransformToNewFormat(origDataSet)
                noisyDataSet = mucker(newDataSet, injectIterations)
                ret = TransformToOriginalFormat(noisyDataSet, origDataSet)
                return ret

def NormalizeTrainDataSet(normalize, trainDs):
        if normalize == 0:
                return trainDs
        elif normalize == 1:
                return trainDs.NormalizeDataSet(outlierRemoval=True)
        elif normalize == 2:
                return trainDs.NormalizeDataSet(outlierRemoval=False)
        else:
                raise Exception("Unknown normalize parameter: " + str(normalize))
        
def NormalizeTestDataSet(normalize, testDs, attributes):
        if normalize == 0:
                return testDs
        elif normalize == 1:
                return testDs.ApplyNormalization(attributes, outlierRemoval=True)
        elif normalize == 2:
                return testDs.ApplyNormalization(attributes, outlierRemoval=False)
        else:
                raise Exception("Unknown normalize parameter: " + str(normalize))



def FeatureSelectionViaInfoGain(fssPercentage, trainDs):
    dataSet = DiscretizeNumericClassByMedian(trainDs)
    #apply Feature Subset Selection via Information Gain    
    if fssPercentage != None:
                discTrainDs = dataSet.FayyadIraniDiscretizer("class")
                selectedFeatures = discTrainDs.featureReductionViaInfoGain("class", fssPercentage)
                return trainDs.applyFSS(selectedFeatures)
    else:
                return trainDs


def ApplyFeatureSelectionOnTestData(testDs, attributes):
        if len(attributes) != len(testDs.attributes):
                selectedFeatures = getColumnIndexForAttributes(attributes)
                return testDs.applyFSS(selectedFeatures)
        else:
                return testDs

def ProjectViaFastMap(trainDs):
        return trainDs.project()

def SplitIntoQuadrants(projectedDs, stopingCriteria):
        #split the projected instance space into a disjoint set of quadrants
    minSize = DefineMinClusterSize(stopingCriteria, projectedDs)
    quadrant = Quadrant(None, "0", projectedDs, 0, None, None, None, None)
    quadrant.divide(minSize) #create a dendogram quadrants
    return quadrant

def Condense(trainDs, clusters):
    #estimate centroid of each cluster to create a condensed training set
    condensedTrainDs = trainDs.condenseDataSet(clusters)
    condensedTrainDs.computeClassStatistics()
    return condensedTrainDs

def GridClusteringWithNoFilter(quadrant):
        #get and process the list of leaves from the dendogram
    listOfLeaves = []
    quadrant.getAllLeaves(listOfLeaves)

    #create a cluster for each leaf quadrant
    clusters = gridClustering(quadrant, listOfLeaves, "")    

    return clusters

def DefineMinClusterSize(stopingCriteria, trainDs):
        #define the minimal allowed size (#instances) of clusters 
    if stopingCriteria == "C1": 
        minSize = trainDs.NrInstances()**0.5
    elif stopingCriteria == "C2": 
        minSize = 2*(trainDs.NrInstances())**0.5
    else:
        #in this case the minimal cluster size is defined by the size of the training data
        if len(trainDs.instances) < 100: minSize = trainDs.NrInstances()**0.5
        else: minSize = 2*(trainDs.NrInstances())**0.5
    return minSize

def DiscretizeNumericClassByMedian(dataSet):
        if dataSet.isClassNumeric:
                return dataSet.discretizeNumericClassByMedian()
        else:
                return dataSet

def NearestNeighbor(trainDs, testDs, k):
    trainingRunTime = 0    
    t = datetime.now()
    predictions = []
    for instance in testDs.instances:
        predClass = trainDs.estimateClassFromNearestNeighbors(instance, k, projected=False)
        predictions.append((instance.classVal, predClass))
    testingRunTime = datetime.now() - t
    return (predictions, trainDs, trainingRunTime, testingRunTime, trainingRunTime + testingRunTime)

def MyNaiveBayes(trainDs1, testDs1):
    #training process
    t = datetime.now()
    trainDs = trainDs1.FayyadIraniDiscretizer("class")
    testDs = testDs1.discretizeDataSet(trainDs.attributes)
    trainDs.computeClassStatistics()
    trainDs.computeAttributeValuesFrequencies()
    trainingRunTime = datetime.now() - t

    #testing process
    t = datetime.now()
    predictions = []
    for instance in testDs.instances:
        predClass = trainDs.Likelihood(instance)
        predictions.append((instance.classVal, predClass))
    testingRunTime = datetime.now() - t
    return (predictions, trainDs, trainingRunTime, testingRunTime, trainingRunTime + testingRunTime)


def NaiveBayes(trainDs1, testDs1, discretize=True):
    t = datetime.now()
    trainDs = trainDs1
    testDs = testDs1
    if discretize == True:
        trainDs = trainDs1.FayyadIraniDiscretizer("class")
        testDs = testDs1.discretizeDataSet(trainDs.attributes)
    learner = "weka.classifiers.bayes.NaiveBayes"
    parameters = ""
    predictions = WekaLearner(trainDs, testDs, learner, parameters)
    overallRunTime = datetime.now() - t
    return (predictions, trainDs, None, None, overallRunTime)

def RandomForest(trainDs1, testDs1, discretize=True):
    t = datetime.now()
    trainDs = trainDs1
    testDs = testDs1
    if discretize == True:
        trainDs = trainDs1.FayyadIraniDiscretizer("class")
        testDs = testDs1.discretizeDataSet(trainDs.attributes)
    learner = "weka.classifiers.trees.RandomForest"
    parameters = "-I 10 -K 0 -S 1"
    predictions = WekaLearner(trainDs, testDs, learner, parameters)
    overallRunTime = datetime.now() - t
    return (predictions, trainDs, None, None, overallRunTime)

def LinearRegression(trainDs, testDs):
    t = datetime.now()
    learner = "weka.classifiers.functions.LinearRegression"
    parameters = " -S 0 -R 1.0E-8 -i"
    trainDs.computeClassStatistics()
    predictions = WekaLearner(trainDs, testDs, learner, parameters)
    overallRunTime = datetime.now() - t
    return (predictions, trainDs, None, None, overallRunTime)

def M5P(trainDs, testDs):
    t = datetime.now()
    learner = "weka.classifiers.trees.M5P"
    parameters = " -M 4.0 -i"
    trainDs.computeClassStatistics()
    predictions = WekaLearner(trainDs, testDs, learner, parameters)
    overallRunTime = datetime.now() - t
    return (predictions, trainDs, None, None, overallRunTime)

def WekaLearner(trainDs, testDs, learner, parameters):
    jar = "/usr/share/java/weka.jar "
    weka = "nice -n 20 java -Xmx2048M -cp " + jar
    trainDs.exportDataSetToArff("train.arff")
    testDs.exportDataSetToArff("test.arff")

    cmdLinux = weka + learner + " -p 0 " + parameters + " -t train.arff -T test.arff > results.txt"
    cmd = "java " + learner + " -p 0 " + parameters + " -t train.arff -T test.arff > results.txt"
    os.system(cmdLinux)    

    i = 0

    predictions = []

    f = open("results.txt", "r")
    for line in f: 
        i += 1
        if i > 5:
            tokens = split(line)
            if len(tokens) < 4: continue
            actual = tokens[1]
            predicted = tokens[2]
            if trainDs.isClassNumeric == True: #regression
                predictions.append((float(actual), float(predicted)))
            else: #classification
                ind1 = string.rfind(actual, ":")
                ind2 = string.rfind(predicted, ":")
                if ind1 >= 0 and ind1 < len(actual)-1:
                    actual = actual[ind1+1:]
                if ind2 >= 0 and ind2 < len(predicted)-1:
                    predicted = predicted[ind1+1:]
                predictions.append((actual, predicted))
    f.close()

    return predictions

def TransformToNewFormat(origDataSet):
        newDataset = Dataset()
        for attr in origDataSet.attributes: newDataset.features.append(attr.attributeName)
        for i in range(origDataSet.NrInstances()):
                originalInstance = origDataSet.instances[i]
                inst = Inst()
                inst.id = i+1
                inst.origId = originalInstance.Id
                for j in range(origDataSet.NrAttributes()):
                        feature = Feature()
                        feature.id = j
                        feature.header = origDataSet.attributes[j].attributeName
                        feature.value = originalInstance.attributeValues[j]
                        inst.features.append(feature)
                newDataset.instances.append(inst)
        return newDataset

def TransformToOriginalFormat(newDataSet, origDataSet):
        dataSet = origDataSet.copyDataSet()

        for inst in newDataSet.instances:
                instance = Instance()
                instance.Id = inst.origId
                for feature in inst.features:
                        instance.attributeValues.append(feature.value)
                instance.classVal = instance.attributeValues[origDataSet.NrAttributes()-1]
                if origDataSet.isClassNumeric == True:
                        instance.attributeValues[origDataSet.NrAttributes()-1] = float(instance.attributeValues[origDataSet.NrAttributes()-1])
                else:
                        instance.attributeValues[origDataSet.NrAttributes()-1] = str(int(instance.attributeValues[origDataSet.NrAttributes()-1]))
                instance.classVal = instance.attributeValues[origDataSet.NrAttributes()-1]
                dataSet.addInstance(instance)

        return dataSet

def ExportPeeker(fileName, outputDir, fssPercentage, normalize, stopingCriteria):
    procFileName = processFileName(fileName)
    dataSet1 = readArff(fileName)
    
    fssTrainDs = FeatureSelectionViaInfoGain(fssPercentage, dataSet1)
    normTrainDs = NormalizeTrainDataSet(normalize, fssTrainDs)        
    projected = ProjectViaFastMap(normTrainDs)
    dendrogram = SplitIntoQuadrants(projected, stopingCriteria)
    clusters = GridClusteringWithNoFilter(dendrogram)
    condensed = GetClusterCentroids(clusters)        

    exportAttributes(outputDir + procFileName + "_attributes.csv", dataSet1)
    dendrogram.printQuadrantToFile(outputDir + procFileName + "_quadrants.txt")
    projected.printProjectedDataSetToFile(outputDir + procFileName + "_projected.txt", " ", False)
    exportClusterCentroids(dataSet1, condensed, clusters, outputDir + procFileName + "_centroids_raw.csv", False)
    exportClusterCentroids(dataSet1, condensed, clusters, outputDir + procFileName + "_centroids_disc.csv", True)
    exportAllDataWithCluster(dataSet1, projected, outputDir + procFileName + "_all.csv")
                
    #quadrant.printStatistics(classAttribute)
    #printClusterStatistics(clusters, classAttribute)

def exportAttributes(fileName, dataSet1):
    dataSet = DiscretizeNumericClassByMedian(dataSet1)
    discretizedDataSet = dataSet.FayyadIraniDiscretizer("class")
    columnEntropy = discretizedDataSet.estimateColumnsEntropy("class")
    f = open(fileName, 'w')
    if discretizedDataSet.isClassNumeric == False:
            discretizedDataSet.computeClassStatistics()
            entropy = discretizedDataSet.entropy
    else:
            entropy = float(1)
    f.write(str(entropy) + "\n")
    f.write("Attributes, Entropy, Information Gain\n")
    for columnEnt in columnEntropy:
                f.write(dataSet.attributes[columnEnt[0]].attributeName+", "+str(columnEnt[1])+", "+str(entropy-columnEnt[1])+"\n")
    f.close()

def exportClusters(fileName, clusters):
        dataSets = []
        for cluster in clusters:
                if cluster.mergedDataSet.NrInstances() != 0: dataSets.append(cluster.mergedDataSet)
        dataSet = mergeDataSets(dataSets)
        f = open(fileName, 'w')
        dataSet.exportToCSV(f)
        f.close()

def exportDataSetToCsv(fileName, dataSet):
        f = open(fileName, 'w')
        dataSet.exportToCSV(f)
        f.close()

def exportClusterCentroids(allDataSet, condensed, clusters, exportFileName, equalWidthDisc):
    f = open(exportFileName, 'w')

    if equalWidthDisc == True:
        equalWidthDs = condensed.EqualWidthDiscretization("class", nrBins=10)

        discDataSet = DiscretizeNumericClassByMedian(allDataSet)
    discDataSet = discDataSet.FayyadIraniDiscretizer("class")
    columnEntropy =  discDataSet.estimateColumnsEntropy("class")

    line = "clusterId, x, y, clusterSize, "
    for i in range(condensed.NrAttributes()-1):
                attributeId = allDataSet.attributes[columnEntropy[i][0]].Id
                attribute = condensed.getAttributeById(attributeId)
                line += attribute.attributeName + ", "
        
    line += "class"
    if allDataSet.isClassNumeric == False: line += ", classRate"
    f.write(line + "\n")

    for inst in condensed.instances:
                cluster = GetClusterById(clusters, inst.clusterId)
                
                line = str(inst.clusterId) + ", " + str(inst.x) + ", " + str(inst.y) + ", " + str(cluster.mergedDataSet.NrInstances()) + ", "

                if equalWidthDisc == False:
                        instance = condensed.denormalizeInstance(inst)
                else:
                        instance = equalWidthDs.discretizeInstance2(inst)
                
                for i in range(condensed.NrAttributes()-1):
                        attributeId = allDataSet.attributes[columnEntropy[i][0]].Id
                        condensedAttrIndex = condensed.getAttributeIndexById(attributeId)
                        line += str(instance.attributeValues[condensedAttrIndex]) + ", "
                line += str(inst.classVal)
                if allDataSet.isClassNumeric == False:
                        line += ", " + str(inst.classRate)

                f.write(line + "\n")

    f.close()

def exportAllDataWithCluster(allDataSet, clusteredDs, exportFileName):
    f = open(exportFileName, 'w')

    allDataSet.instances.sort(key = lambda instance: instance.Id)
    clusteredDs.instances.sort(key = lambda instance: instance.Id)

    discDataSet = DiscretizeNumericClassByMedian(allDataSet)
    discDataSet = discDataSet.FayyadIraniDiscretizer("class")
    columnEntropy =  discDataSet.estimateColumnsEntropy("class")

    line = "instanceId, clusterId, x, y, "
    for i in range(allDataSet.NrAttributes()-1):
        attributeId = allDataSet.attributes[columnEntropy[i][0]].Id
        attribute = allDataSet.getAttributeById(attributeId)
        line += attribute.attributeName + ", "
        
    line += "class"
    f.write(line + "\n")

    for inst in allDataSet.instances:
                clusteredInst = clusteredDs.findInstanceById(inst.Id)
                line = str(inst.Id) + ", " + str(clusteredInst.clusterId) + ", " + str(clusteredInst.x) + ", " + str(clusteredInst.y) + ", " 
                
                for i in range(allDataSet.NrAttributes()-1):
                        attributeId = allDataSet.attributes[columnEntropy[i][0]].Id
                        attrIndex = allDataSet.getAttributeIndexById(attributeId)
                        line += str(inst.attributeValues[attrIndex]) + ", "
                line += str(inst.classVal)
                f.write(line + "\n")

    f.close()
       
def DefineFileNames(dataSetName, strD):
    procFileName = processFileName(dataSetName) #process file name (remove path and extension)
    logFileName = "log_" + strD + "_" + procFileName + ".txt" #define the name of log file specific to the current data set
    return (procFileName, logFileName)

def GetLearnerName(learner, level, noiseLevels):
        if len(noiseLevels) == 1 and level == 0:
                return learner
        else:
                return learner + "_noisy_" + str(level)

def InitializePerformances(learners, noiseLevels):
    performances = {}
    for level in noiseLevels:
                for learner in learners:
                        learnerName = GetLearnerName(learner, level, noiseLevels)
                        performances[learnerName] = {}
                        performances[learnerName]["all_errors"] = []
                        performances[learnerName]["all_MRE"] = []
    return performances

def InitializePerformanceForRepeatStep(performances, repeatStep):
    for learner in performances: 
        performances[learner][repeatStep] = {}

def WriteExperimentalAndClassStatistics(logFile, trainDs, testDs, run, fold):
    logFile.write("" + "\n")
    logFile.write("Run: " + str(run) + ", Fold: " + str(fold) + "\n")        
    if trainDs.isClassNumeric == False:
        logFile.write("Train - Size: " + str(trainDs.NrInstances()) + ", Bugs: " + str(trainDs.getClassFreq(target_class)) + "\n")#BP1
        logFile.write("Test  - Size: " + str(testDs.NrInstances()) +  ", Bugs: " + str(testDs.getClassFreq(target_class))  + "\n")#BP1

def EstimatePerformanceForExperimentalRun(performances, predictions, dataSet, trainTime, testTime, overallRunTime, repeat, fold, learner):
    if trainTime != None: trainTime = float(trainTime.microseconds)/float(1000)
    if testTime != None: testTime = float(testTime.microseconds)/float(1000)        
    if overallRunTime != None: overallRunTime = float(overallRunTime.microseconds)/float(1000)    
    if dataSet.isClassNumeric:
        meanError = UpdateErrors(performances, learner, predictions)
        meanMRE = UpdateMREs(performances, learner, predictions)
        scoreRange = dataSet.maxScore - dataSet.minScore
        performances[learner][repeat][fold] = {"mean_error":meanError, "mmre": meanMRE, "instances":len(dataSet.instances), 
        "columns":len(dataSet.attributes), "scoreRange": scoreRange, "trainRunTime": trainTime, "testRunTime": testTime, "overallRunTime": overallRunTime}
    else:
        confusionMatrix = EstimateConfusionMatrix(dataSet.attributes[len(dataSet.attributes)-1], predictions)
        performance = EvaluateClassification(confusionMatrix)
        expectedEntropy = None
        if hasattr(dataSet, 'expectedEntropy'): 
            expectedEntropy = dataSet.expectedEntropy
        performances[learner][repeat][fold] = {"class_measures":performance, "instances":len(dataSet.instances), 
        "columns":len(dataSet.attributes), "entropy": expectedEntropy, "trainRunTime": trainTime, "testRunTime": testTime, "overallRunTime": overallRunTime}


def EstimateConfusionMatrix(classAttribute, predictions):
    confusionMatrix = {}
    for classValue in classAttribute.attributeValues:
        a = 0 # true negatives
        b = 0 # false negatives
        c = 0 # false positives
        d = 0 # true positives
        for prediction in predictions:
            if prediction[1] != classValue and prediction[0] != classValue:
                a += 1
            elif prediction[1] != classValue and prediction[0] == classValue:
                b += 1
            elif prediction[1] == classValue and prediction[0] != classValue:
                c += 1
            elif prediction[1] == classValue and prediction[0] == classValue:
                d += 1
        confusionMatrix[classValue] = [a, b, c, d]
    return confusionMatrix

def EvaluateClassification(confusionMatrix):
    performance = {}
    for classVal in confusionMatrix:
        pd = None
        pf = None
        prec = None
        acc = None
        fMeasure = None
    
        a = confusionMatrix[classVal][0]
        b = confusionMatrix[classVal][1]
        c = confusionMatrix[classVal][2]
        d = confusionMatrix[classVal][3]

        if (b+d) != 0: pd = float(d)/float(b+d) 
        else: pd = float(0)
        if (a+c) != 0: pf = float(c)/float(a+c) 
        else: pf = float(0)
        if (d+c) != 0: prec = float(d)/float(d+c) 
        else: prec = float(0)
        if (a+b+c+d) != 0: acc = float(a+d)/float(a+b+c+d) 
        else: acc = float(0)
        if prec != None and pd != None and (prec + pd) != 0: fMeasure = 2*(float(prec*pd)/float(prec+pd))
        else: fMeasure = float(0)
        #bal = 1 - ( ( ((0-pf)**2 + (1-pd)**2)**0.5 ) / (2**0.5) )
        if pd != None and pf != None and (pd + (1-pf)) != 0: g = float(2 * pd * (1-pf))/float(pd + (1-pf))
        else: g = float(0)

        performance[classVal] = [pd, pf, prec, fMeasure, acc, g]
    return performance

def UpdateErrors(performances, learner, predictions):
    sumError = 0
    for pred in predictions: 
        actual = float(pred[0])
        predicted = float(pred[1])
        error = math.fabs(actual - predicted)
        sumError += error
        performances[learner]["all_errors"].append(error)
    return float(sumError)/float(len(predictions))

def UpdateMREs(performances, learner, predictions):
    sumMRE = 0
    for pred in predictions: 
        actual = float(pred[0])
        predicted = float(pred[1])
        if actual == 0: actual = 0.00000000001
        mre = math.fabs(actual - predicted)/actual
        sumMRE += mre
        performances[learner]["all_MRE"].append(mre)
    return float(sumMRE)/float(len(predictions))

def PrintPerformancesForExperimentalRun(logFile, performances, trainDs, testDs, classVal, run, fold):
    logFile.write("" + "\n")
    logFile.write("Run: " + str(run) + ", Fold: " + str(fold) + "\n")        
    if trainDs.isClassNumeric == True:
        PrintRegressionPerformances(logFile, performances, run, fold)
    else:
        logFile.write("Train - Size: " + str(trainDs.NrInstances()) + ", Bugs: " + str(trainDs.getClassFreq(target_class)) + "\n")
        logFile.write("Test  - Size: " + str(testDs.NrInstances()) +  ", Bugs: " + str(testDs.getClassFreq(target_class))  + "\n")
        PrintClassificationPerformances(logFile, performances, classVal, run, fold)

def PrintClassificationPerformances(logFile, performances, classVal, i, j):
    logFile.write("Learner,".ljust(35) + "Instances, " + "Columns, " + "class, " + "  PD, " + "Prec, " + "  PF, " + "   F, " + " Acc, " + "   G, " + " Entropy, " + " Traning, " + " Testing, " + " Runtime, " + "\n")    
    for learner in performances:
        performance = performances[learner][i][j]
        PrintClassificationPerformance(logFile, learner, performance, classVal)
    logFile.flush()

def PrintRegressionPerformances(logFile, performances, i, j):
    logFile.write("Learner,".ljust(35) + "Instances, " + "Columns, " + "     Error, " + "     Range, " + "      MMRE, " + " Traning, " + " Testing, " + " Runtime, " + "\n")    
    for learner in performances:
        performance = performances[learner][i][j]
        PrintRegressionPerformance(logFile, learner, performance)
    logFile.flush()

def PrintAvgClassificationPerformances(logFile, learners, performances, classVal):
    logFile.write("\n")
    logFile.write("Averages\n")
    logFile.write("Learner,".ljust(35) + "Instances, " + "Columns, " + "class, " + "  PD, " + "Prec, " + "  PF, " + "   F, " + " Acc, " + "   G, " + " Entropy, " + " RunTime, " + "\n")    
    for learner in learners:
        performance = performances[learner]
        PrintClassificationPerformance(logFile, learner, performance, classVal)

def PrintClassificationPerformance(logFile, source, performance, classVal):
    class_performance = performance["class_measures"]    
    pd = class_performance[classVal][0]
    pf = class_performance[classVal][1]
    prec = class_performance[classVal][2]
    fMeasure = class_performance[classVal][3]
    acc = class_performance[classVal][4]
    g = class_performance[classVal][5]
    expectedEntropy = performance["entropy"]
    instances = performance["instances"]
    columns = performance["columns"]
    trainRunTime = performance["trainRunTime"]
    testRunTime = performance["testRunTime"]
    overallRuntime = performance["overallRunTime"]


    line = (source.replace(",", "") + ", ").ljust(35)
    line += str(int(instances)).rjust(9) + ", "
    line += str(int(columns)).rjust(7) + ", "
    line += classVal.rjust(5) + ", "
    if pd != None: line += ('%.2f' % pd) + ", " 
    else: line += ", "
    if prec != None: line += ('%.2f' % prec) + ", " 
    else: line += ", "
    if pf != None: line += ('%.2f' % pf) + ", " 
    else: line += ", "
    if fMeasure != None: line += ('%.2f' % + fMeasure) + ", " 
    else: line += ", "
    if acc != None: line += ('%.2f' % acc) + ", "
    else: line += ", "
    if g != None: line += ('%.2f' % g) + ", " 
    else: line += ", " 
    if expectedEntropy != None: line += ('%.2f' % expectedEntropy).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", "
    if trainRunTime != None: line += ('%.2f' % trainRunTime).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", " 
    if testRunTime != None: line += ('%.2f' % testRunTime).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", " 
    if overallRuntime != None: line += ('%.2f' % overallRuntime).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", " 
    logFile.write(line + "\n")

def PrintRegressionPerformance(logFile, source, performance):
    error = performance["mean_error"]    
    mmre = performance["mmre"]    
    scoreRange = performance["scoreRange"]
    instances = performance["instances"]
    columns = performance["columns"]
    trainRunTime = performance["trainRunTime"]
    testRunTime = performance["testRunTime"]
    overallRuntime = performance["overallRunTime"]

    line = (source.replace(",", "") + ", ").ljust(35)
    line += str(int(instances)).rjust(9) + ", "
    line += str(int(columns)).rjust(7) + ", "
    line += ('%.2f' % error).rjust(10) + ", " 
    line += ('%.2f' % scoreRange).rjust(10) + ", " 
    line += ('%.2f' % mmre).rjust(10) + ", " 
    if trainRunTime != None: line += ('%.2f' % trainRunTime).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", " 
    if testRunTime != None: line += ('%.2f' % testRunTime).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", " 
    if overallRuntime != None: line += ('%.2f' % overallRuntime).rjust(8) + ", " 
    else: line += (" ").rjust(8) + ", " 
    logFile.write(line + "\n")

def ComparePerformances(isClassNumeric, performances):
    if isClassNumeric == True:
        return ComparePerformances_Regression(performances)
    else:
        return ComparePerformances_Classification(performances)

def ComparePerformances_Regression(performances):
    for learner in performances:
        comparison_Instances = GetMedians(performances, "instances")
        comparison_Columns = GetMedians(performances, "columns")
        comparison_error = ComparePerformanceMeasure(performances, "all_errors", None, None, 0.5, "lesser")
        comparison_mmre = ComparePerformanceMeasure(performances, "all_MRE", None, None, 0.5, "lesser")
        #comparison_avgDist = GetMedians(performances, "avgDist")
        comparison_range = GetMedians(performances, "scoreRange")
    return     {"mean_error":comparison_error, "mmre": comparison_mmre, "instances":comparison_Instances, "columns":comparison_Columns, "score_range": comparison_range}

def ComparePerformances_Classification(performances):
    for learner in performances:
        c=target_class
        comparison_PD = ComparePerformanceMeasure(performances, "class_measures", c, 0, 0.5, "greater")
        comparison_PF = ComparePerformanceMeasure(performances, "class_measures", c, 1, 0.5, "lesser") 
        comparison_Prec = ComparePerformanceMeasure(performances, "class_measures", c, 2, 0.5, "greater")
        comparison_F = ComparePerformanceMeasure(performances, "class_measures", c, 3, 0.5, "greater")
        comparison_Acc = ComparePerformanceMeasure(performances, "class_measures", c, 4, 0.5, "greater")
        comparison_G = ComparePerformanceMeasure(performances, "class_measures", c, 5, 0.5, "greater")
        comparison_Instances = GetMedians(performances, "instances")
        comparison_Columns = GetMedians(performances, "columns")
        comparison_entropy = ComparePerformanceMeasure(performances, "entropy", None, None, 0.5, "greater")
    return     {"pd":comparison_PD, "pf":comparison_PF, "prec":comparison_Prec, "f":comparison_F, "acc":comparison_Acc, 
        "g":comparison_G, "instances":comparison_Instances, "columns":comparison_Columns, "entropy":comparison_entropy}

def PrintComparisons(isClassNumeric, logFile, comparisons, classValue):
    if isClassNumeric == True:
        PrintComparisons_Regression(logFile, comparisons)
    else:
        PrintComparisons_Classification(logFile, comparisons, classValue)

def PrintComparisons_Regression(logFile, comparisons):
    measures = [("mean_error", "lesser"), ("mmre", "lesser")]
    logFile.write("\n")
    for (measure,order) in measures: PrintWinTieLoss(logFile, comparisons[measure], measure, None, order)

def PrintComparisons_Classification(logFile, comparisons, classValue):
    measures = [("acc", "greater"), ("pd", "greater"), ("pf","lesser"), ("prec", "greater"), 
            ("f", "greater"), ("g", "greater"), ("entropy","lesser")]
    logFile.write("\n")    
    for (measure,order) in measures: PrintWinTieLoss(logFile, comparisons[measure], measure, classValue, order)
    
def ComparePerformanceMeasure(performances, key, classVal, measureIndex, alpha, criteria):
    learnerIndexes = []
    populations = []
    medians = []
    mins = []
    maxs = []
    q1s = []
    q3s = []
    spreads = []
    for learner in performances:
        population = GetMeasuresPopulation(performances[learner], classVal, key, measureIndex)
        #population = allPopulations[learner]
        if population[0] == None: continue
        learnerIndexes.append(learner)
        populations.append(population)
        quantiles = mquantiles(population, [0, 0.25, 0.5, 0.75, 1])
        mins.append(quantiles[0])        
        q1s.append(quantiles[1])
        medians.append(quantiles[2])
        q3s.append(quantiles[3])
        maxs.append(quantiles[4])
        spreads.append(quantiles[3] - quantiles[1])


    #initialize wintTieLoss matrix
    winTieLoss = []
    for i in range(len(populations)):
        winTieLoss1 = []
        for j in range(len(populations)):
            winTieLoss1.append(None)
        winTieLoss.append(winTieLoss1)
    
    for i in range(len(populations)):
        for j in range(i+1, len(populations)):
            z_statistic, p_value = wilcoxon(populations[i], populations[j])
            if p_value > alpha: #the population are statistically the same
                winTieLoss[i][j] = 0
                winTieLoss[j][i] = 0
            else:
                median_i = medians[i]
                median_j = medians[j]
                if criteria == "greater":
                    if median_i > median_j:
                        winTieLoss[i][j] = 1
                        winTieLoss[j][i] = -1
                    elif median_i < median_j:
                        winTieLoss[i][j] = -1
                        winTieLoss[j][i] = 1
                    else:
                        winTieLoss[i][j] = 0
                        winTieLoss[j][i] = 0
                elif criteria == "lesser":
                    if median_i > median_j:
                        winTieLoss[i][j] = -1
                        winTieLoss[j][i] = 1
                    elif median_i < median_j:
                        winTieLoss[i][j] = 1
                        winTieLoss[j][i] = -1
                    else:
                        winTieLoss[i][j] = 0
                        winTieLoss[j][i] = 0
                else:
                    raise Exception("ComparePerformanceMeasure - Unknown criteria: " + criteria)

    tie = {}
    win = {}
    loss = {}
    medians1 = {}
    mins1 = {}
    maxs1 = {}
    q1s1 = {}
    q3s1 = {}
    spreads1 = {}

    for i in range(len(learnerIndexes)):
        learner = learnerIndexes[i]
        medians1[learner] = medians[i]
        mins1[learner] = mins[i]
        maxs1[learner] = maxs[i]
        q1s1[learner] = q1s[i]
        q3s1[learner] = q3s[i]
        spreads1[learner] = spreads[i]
        tie[learner] = 0
        win[learner] = 0
        loss[learner] = 0
        for j in range(len(learnerIndexes)):
            if i != j:
                ret = winTieLoss[i][j]
                if ret == 1: win[learner] += 1
                elif ret == -1: loss[learner] += 1
                else: tie[learner] += 1

    return [medians1, win, tie, loss, mins1, q1s1, q3s1, maxs1, spreads1]

def GetMeasuresPopulation(performance, classVal, key, measureIndex):
    population = []
    if key.startswith("all") == True:
        return performance[key]
    for i in performance:
        if isinstance(i, int) == False: continue
        for j in performance[i]:
            if measureIndex != None:
                population.append(performance[i][j][key][classVal][measureIndex])
            else:
                population.append(performance[i][j][key])
    return population

def GetMedians(performances, key):
    medians = {}
    for learner in performances:
        population = GetMeasuresPopulation(performances[learner], None, key, None) 
        medians[learner] = np.median(population)
    return [medians]

def PrintWinTieLoss(logFile, comparison_results, measureName, classVal, criteria):
    medians = comparison_results[0]
    win = comparison_results[1]
    tie = comparison_results[2]
    loss = comparison_results[3]

    #sort by median
    if criteria == "greater":
        learners = sorted([(0-value,key) for (key,value) in medians.items()])
    elif criteria == "lesser":
        learners = sorted([(value,key) for (key,value) in medians.items()])
    else:
        raise Exception("PrintWinTieLoss - Unknown criteria: " + criteria)
        
    logFile.write("\n")
    logFile.write(measureName + " results" + "\n")
    if classVal == None:
        logFile.write("Learner,".ljust(30) + "Median, " + " Win, " + " Tie, " + "Loss" + "\n")
    else:
        logFile.write("Learner,".ljust(30) + "class, " + "Median, " + " Win, " + " Tie, " + "Loss" + "\n")
    for pair in learners:
        learner = pair[1]

        line = (learner + ", ").ljust(30)
        if classVal != None:
            line += classVal.rjust(5) + ", "
        line += ('%.2f' % medians[learner]).rjust(6) + ", "
        line += str(win[learner]).rjust(4) + ", "
        line += str(tie[learner]).rjust(4) + ", "
        line += str(loss[learner]).rjust(4)
        logFile.write(line + "\n")

def PrintSummary(isClassNumeric, generalLogFile, generalLogFile2, procFileName, comparisons):
    if isClassNumeric == True:
        ProcessResults_Regression(generalLogFile, procFileName, comparisons)
        ProcessResults_Regression2(generalLogFile2, procFileName, comparisons)
    else:
        ProcessResults_Classification(generalLogFile, procFileName, comparisons)
        #ProcessResults_Classification2(generalLogFile2, procFileName, comparisons)

def ProcessResults_Classification(logFile, procFileName, comparisons):
    logFile.write("Data,".ljust(25) + "Learner,".ljust(35) + "Instances, " + "Columns, " + "Prec,  " + "  PD,  " + "  PF,  " + "   F,  " + "   G,  " +  " En.,  " + "Rank,  " + "\n")

    comparison_PD = comparisons["pd"]
    comparison_PF = comparisons["pf"]
    comparison_Prec = comparisons["prec"]
    comparison_F = comparisons["f"]
    comparison_Acc = comparisons["acc"]
    comparison_G = comparisons["g"]
    comparison_Instances = comparisons["instances"]
    comparison_Columns = comparisons["columns"]
    comparison_entropy = comparisons["entropy"]

    fMeasures = comparison_F[0]
    learners = sorted([(-value,key) for (key,value) in fMeasures.items()])
    for pair in learners:
        learner = pair[1]
        prec = comparison_Prec[0][learner]
        pd = comparison_PD[0][learner]
        pf = comparison_PF[0][learner]
        g = comparison_G[0][learner]
        fMeasure = comparison_F[0][learner]
        fRank = comparison_F[3][learner]
        instances = comparison_Instances[0][learner]
        columns = comparison_Columns[0][learner]
        entropy = None
        if learner in comparison_entropy[0]: entropy = comparison_entropy[0][learner]

        line = (procFileName + ",").ljust(25) + (learner.replace(",", "") + ",").ljust(35)
        line += str(int(instances)).rjust(9) + ", "
        line += str(int(columns)).rjust(7) + ", "
        if prec != None: line += ('%.2f' % prec) + ",  " 
        else: line += ", "        
        if pd != None: line += ('%.2f' % pd) + ",  " 
        else: line += ", "
        if pf != None: line += ('%.2f' % pf) + ",  " 
        else: line += ", "
        if fMeasure != None: line += ('%.2f' % + fMeasure) + ",  " 
        else: line += ", "
        if g != None: line += ('%.2f' % g) + ",  "
        else: line += ", "
        if entropy != None: line += ('%.2f' % entropy) + ",  "
        else: line += "     , "
        if fRank != None: line += str(fRank).rjust(4) + ",  " 
        else: line += ", "
        if fRank==0: logFile.write(line + "\n")

    logFile.write('\n')

    logFile.flush()

def ProcessResults_Regression(logFile, procFileName, comparisons):
    logFile.write("Data,".ljust(12) + "Learner,".ljust(35) + "Instances, " + "Columns, " + "       MIN,  " + 
    "        q1,  " + "    median,  " + "        q3,  " + "       MAX,  " + "    spread, " + "Rank  " + "\n")

    comparison_error = comparisons["mean_error"]
    comparison_Instances = comparisons["instances"]
    comparison_Columns = comparisons["columns"]
    #comparison_dist = comparisons["avgDist"]
    errors = comparison_error[0]
    learners = sorted([(value,key) for (key,value) in errors.items()])
    for pair in learners:
        learner = pair[1]
        error = comparison_error[0][learner]
        rank = comparison_error[3][learner]
        instances = comparison_Instances[0][learner]
        columns = comparison_Columns[0][learner]
        emin = comparison_error[4][learner]
        q1 = comparison_error[5][learner]
        q3 = comparison_error[6][learner]
        emax = comparison_error[7][learner]
        spread = comparison_error[8][learner]
        #dist = comparison_dist[0][learner]

        line = (procFileName + ",").ljust(12) + (learner.replace(",", "") + ",").ljust(35)
        line += str(int(instances)).rjust(9) + ", "
        line += str(int(columns)).rjust(7) + ", "
        line += ('%.2f' % emin).rjust(10) + ",  "
        line += ('%.2f' % q1).rjust(10) + ",  "
        line += ('%.2f' % error).rjust(10) + ",  "
        line += ('%.2f' % q3).rjust(10) + ",  "
        line += ('%.2f' % emax).rjust(10) + ",  "
        line += ('%.2f' % spread).rjust(10) + ",  "
        line += str(rank).rjust(3) + ""
        #if dist != None: line += ('%.2f' % dist).rjust(10)
        logFile.write(line + "\n")

    logFile.write('\n')

    logFile.flush()

def ProcessResults_Regression2(logFile, procFileName, comparisons):
    logFile.write("Data,".ljust(12) + "Learner,".ljust(35) + "Instances, " + "Columns, " + "       MIN,  " + 
    "        q1,  " + "    median,  " + "        q3,  " + "       MAX,  " + "    spread, " + "Rank  " + "\n")

    comparison_error = comparisons["mmre"]
    comparison_Instances = comparisons["instances"]
    comparison_Columns = comparisons["columns"]
    errors = comparison_error[0]
    learners = sorted([(value,key) for (key,value) in errors.items()])
    for pair in learners:
        learner = pair[1]
        error = comparison_error[0][learner]
        rank = comparison_error[3][learner]
        instances = comparison_Instances[0][learner]
        columns = comparison_Columns[0][learner]
        emin = comparison_error[4][learner]
        q1 = comparison_error[5][learner]
        q3 = comparison_error[6][learner]
        emax = comparison_error[7][learner]
        spread = comparison_error[8][learner]
    
        line = (procFileName + ",").ljust(12) + (learner.replace(",", "") + ",").ljust(35)
        line += str(int(instances)).rjust(9) + ", "
        line += str(int(columns)).rjust(7) + ", "
        line += ('%.2f' % emin).rjust(10) + ",  "
        line += ('%.2f' % q1).rjust(10) + ",  "
        line += ('%.2f' % error).rjust(10) + ",  "
        line += ('%.2f' % q3).rjust(10) + ",  "
        line += ('%.2f' % emax).rjust(10) + ",  "
        line += ('%.2f' % spread).rjust(10) + ",  "
        line += str(rank).rjust(3) + ""        
        logFile.write(line + "\n")

    logFile.write('\n')

    logFile.flush()

def gridClustering(quadrant, listOfLeaves1, filterName = ""):
    if filterName == "variance":
        return gridClusteringWithVarianceFilter(quadrant, listOfLeaves1)
    elif filterName == "onecluster":
        return gridClusteringOneCluster(quadrant, listOfLeaves1)
    else:
        return gridClusteringNoFilter(quadrant, listOfLeaves1)

def gridClusteringNoFilter(quadrant, listOfLeaves1):
    #order quadrants by density (decreasing order)
    listOfLeaves = listOfLeaves1[:]
    listOfLeaves.sort(key = lambda leaf: 0-leaf.density)

    maxSurface = (quadrant.maxX - quadrant.minX) * (quadrant.maxY - quadrant.minY)

    clusters = []
    clusterId = 0

    for quad in listOfLeaves: 
        cluster = Cluster(clusterId)        
        quad.cluster = cluster
        cluster.quadrants.append(quad)
    
        cluster.mergeQuadrants() #merge the datasets of the cluster's quadrants
        cluster.computeStatistics(maxSurface) #compute the cluster's statistics
    
        clusters.append(cluster)
        
        clusterId += 1

    return clusters

def gridClusteringOneCluster(quadrant, listOfLeaves1):
    #order quadrants by density (decreasing order)
    listOfLeaves = listOfLeaves1[:]
    listOfLeaves.sort(key = lambda leaf: 0-leaf.density)

    maxSurface = (quadrant.maxX - quadrant.minX) * (quadrant.maxY - quadrant.minY)

    clusters = []
    clusterId = 0

    cluster = Cluster(clusterId)
    for quad in listOfLeaves: 
        quad.cluster = cluster
        cluster.quadrants.append(quad)

    cluster.mergeQuadrants() #merge the datasets of the cluster's quadrants
    cluster.computeStatistics(maxSurface) #compute the cluster's statistics

    clusters.append(cluster)
        

    return clusters


def gridClusteringWithVarianceFilter(quadrant, listOfLeaves1):
    #sort leaf quadrants in decreasing order by density         
    listOfLeaves = listOfLeaves1[:]
    listOfLeaves.sort(key = lambda leaf: 0-leaf.density)

    initialVariance = quadrant.projectedDataSet.varianceScore
    nrBlocks = len(listOfLeaves)
    maxSurface = (quadrant.maxX - quadrant.minX) * (quadrant.maxY - quadrant.minY)
    totalNrInstances = len(quadrant.projectedDataSet.instances)

    clusters = []
    clusterId = 0
    while len(listOfLeaves) > 0:
        #pick the densest quadrant from the remaining ones to create a new cluster
        centroid = listOfLeaves.pop(0)                 
        cluster = Cluster(clusterId)
        cluster.quadrants.append(centroid)
        centroid.cluster = cluster

        #estimate the current cluster statistics
        clusterSumScore = centroid.projectedDataSet.sumScore
        clusterSumSquaresScore = centroid.projectedDataSet.sumSquareScore
        clusterNumInstances = len(centroid.projectedDataSet.instances)
        clusterVariance = 0
        if clusterNumInstances > 1:
            clusterVariance = (clusterSumSquaresScore  - ((clusterSumScore ** 2) / clusterNumInstances)) 
            clusterVariance = clusterVariance / (clusterNumInstances-1)

        k = 0

        #for each quadrant in the cluster evaluate its neighbors
        while k < len(cluster.quadrants):
            currentQuad = cluster.quadrants[k]

            #check all the neighbors of the currentQuad whether they can be added to the cluster
            for neigh in listOfLeaves:
                if currentQuad.isNeighbor(neigh):
                    #estimate cluster statistics if the current neighbor is added to the cluster
                    clusterSumScore1 = clusterSumScore + neigh.projectedDataSet.sumScore
                    clusterSumSquaresScore1 = clusterSumSquaresScore +  neigh.projectedDataSet.sumSquareScore
                    clusterNumInstances1 = clusterNumInstances + len(neigh.projectedDataSet.instances)
                    clusterVariance1 = 0
                    if clusterNumInstances1 > 1:
                        clusterVariance1 = (clusterSumSquaresScore1  - ((clusterSumScore1 ** 2) / clusterNumInstances1)) / (clusterNumInstances1-1)
                    
                    #add the current neighbor if the class variance in cluster is decreased
                    if clusterVariance1 < clusterVariance:
                        cluster.quadrants.append(neigh)
                        neigh.cluster = cluster
                        listOfLeaves.remove(neigh)

                        #update current cluster statistics
                        clusterSumScore = clusterSumScore1
                        clusterSumSquaresScore = clusterSumSquaresScore1
                        clusterNumInstances = clusterNumInstances1
                        clusterVariance = clusterVariance1
                        k += 1

                clusters.append(cluster) #append current cluster to the list of clusters
                cluster.mergeQuadrants() #merge all the data sets corresponding to all quadrants contained in the cluster
        cluster.nrInstances = len(cluster.mergedDataSet.instances)                
        clusterId += 1

    clusters.sort(key = lambda cluster: 0-cluster.nrInstances)

    return clusters



def estimateEnviedAndFeared(clusters):
    dataSet = DataSet()
    for cluster in clusters:
        if cluster.nrInstances == 0: continue
        instance = cluster.centroidInstance2
        cluster.mergedDataSet.computeCentroid()        
        projectedInst = cluster.mergedDataSet.centroidInstance
        if instance == None: continue
        #if projectedInst.classVal == 0 or projectedInst.classVal == 1: continue


        #find envied
        env1Id = -1
        env2Id = -1
        env1Dist = 1000000000000
        env2Dist = 1000000000000
        env1Score = 0
        env2Score = 0
        for cluster1 in clusters:
            if cluster1.clusterId != cluster.clusterId:
                projectedInst1 = cluster1.quadrants[0].projectedDataSet.centroidInstance
                if projectedInst1 == None: continue
                if projectedInst1.classVal < projectedInst.classVal:
                    dist = (dataSet.squareDistanceOnProjected(projectedInst, projectedInst1))**0.5
                    if dist < env1Dist:
                        env2Id = env1Id
                        env2Dist = env1Dist
                        env2Score = env1Score
                        env1Id = cluster1.clusterId
                        env1Dist = dist
                        env1Score = projectedInst.classVal - projectedInst1.classVal
                    elif dist < env2Dist:
                        env2Id = cluster1.clusterId
                        env2Dist = dist
                        env2Score = projectedInst.classVal - projectedInst1.classVal

        #find feared
        fear1Id = -1
        fear2Id = -1
        fear1Dist = 1000000000000
        fear2Dist = 1000000000000
        fear1Score = 0
        fear2Score = 0
        for cluster1 in clusters:
            if cluster1.clusterId != cluster.clusterId:
                projectedInst1 = cluster1.quadrants[0].projectedDataSet.centroidInstance
                if projectedInst1 == None: continue
                if projectedInst1.classVal > projectedInst.classVal:
                    dist = (dataSet.squareDistanceOnProjected(projectedInst, projectedInst1))**0.5
                    if dist < fear1Dist:
                        fear2Id = fear1Id
                        fear2Dist = fear1Dist
                        fear2Score = fear1Score
                        fear1Id = cluster1.clusterId
                        fear1Dist = dist
                        fear1Score = projectedInst1.classVal - projectedInst.classVal
                    elif dist < fear2Dist:
                        fear2Id = cluster1.clusterId
                        fear2Dist = dist
                        fear2Score = projectedInst1.classVal - projectedInst.classVal

        scoreChange = float(env1Score + env2Score + fear1Score + fear2Score)/float(4)

        print str(cluster.clusterId) + ", " + str(env1Id) + ", " + str(env2Id) + ", " + str(fear1Id) + ", " + str(fear2Id) + ", " + str(scoreChange) + ", " + str(env1Score) + ", " + str(env2Score) + ", " + str(fear1Score) + ", " + str(fear2Score)


def ProcessDataSet_via_LR_Sample(fileName, fssPercentage, stopingCriteria, exportData, printStatistics, projectedDir, clusterDir, samplePercentage):
    print fileName    
    procFileName = processFileName(fileName)    
    dataSet1 = readArff(fileName)    
    if fssPercentage != None:
        procFileName += "_fss"        
        discretizedDataSet = dataSet1.FayyadIraniDiscretizer("class")
        selectedFeatures = discretizedDataSet.featureReductionViaInfoGain("class", fssPercentage)
        dataSet = dataSet1.applyFSS(selectedFeatures)
    else:
        dataSet = dataSet1
    if stopingCriteria == "C1": 
        minSize = dataSet.NrInstances()**0.5
    elif stopingCriteria == "C2": 
        minSize = 2*(dataSet.NrInstances())**0.5
    else:
        #in this case the minimal cluster size is defined by the size of the training data
        if len(dataSet.instances) < 100: minSize = dataSet.nrInstances()**0.5
        else: minSize = 2*(dataSet.NrInstances())**0.5

    classAttribute = dataSet.attributes[len(dataSet.attributes) - 1] #set class attribute (the last attribute)

    dataSet.computeClassStatistics()

    #project data set via FastMap
    point = None
    projectedDs = dataSet.project(selectedIndex = None, east = None, west = None, nrInstances = None, logFilter = True)

    #split the projected instance space into a disjoint set of quadrants
    quadrant = Quadrant(None, "0", projectedDs, 0, None, None, None, None, -4.2, -3.1, -1.6, -1.8)
    quadrant.divide(minSize) #create a dendogram quadrants

    #get and process the list of leaves from the dendogram
    listOfLeaves = []
    quadrant.getAllLeaves(listOfLeaves)

    #group the leaf quadrants into clusters
    clusters = gridClustering(quadrant, listOfLeaves, "onecluster")    
    setInstanceClusterId(clusters)

    trainSamples = {}
    sumDist = 0
    nr = 0
    for cluster in clusters:
        trainSamples[cluster.clusterId] = cluster.mergedDataSet.sample(float(samplePercentage)/2, dataSet)
        sumDist += trainSamples[cluster.clusterId].sumDist
        nr += trainSamples[cluster.clusterId].nr1

    condensedTrainDs = dataSet.condenseFromSamples(trainSamples)
    condensedTrainDs.computeClassStatistics()

    if exportData == True:
        #exportAttributes("output/attributes_" + procFileName + ".csv", dataSet1)
        projectedDs.printProjectedDataSetToFile(projectedDir + procFileName + ".txt", " ", False)
        #printClustersToFile(clusters, clusterDir + procFileName + "_v3.txt")
        exportClusterCentroids(clusters, dataSet, True, clusterDir + procFileName + "_after_disc.csv")
        quadrant.printQuadrantToFile(clusterDir + procFileName + ".txt", clusterDir + procFileName + "_v2.txt")        
        #exportClusterCentroids(clusters, dataSet, False, clusterDir + procFileName + "_after.csv")
        estimateEnviedAndFeared(clusters)
        #exportDsWithCluster(dataSet1, discretizedDataSet, clusters, clusterDir + procFileName + "_before.csv")

    if printStatistics == True:
        quadrant.printStatistics(classAttribute)
        printClusterStatistics(clusters, classAttribute)

    print

def ProcessDataSet_GridCluster_Entropy(dataSet, procFileName, exportata, printStatistics, filterType):
    alpha = float(0)
    beta = float(1)
    
    classAttribute = dataSet.attributes[len(dataSet.attributes) - 1] #set class attribute (the last attribute)

    #project data set via FastMap
    point = None
    projectedDs = dataSet.project()

    #split the projected instance space into a disjoint set of quadrants
    quadrant = Quadrant(None, "0", projectedDs, 0, None, None, None, None, -5, -5, 5, 5)
    quadrant.divide(2*(len(projectedDs.instances)**0.5)) #create a dendogram quadrants

    #get and process the list of leaves from the dendogram
    listOfLeaves = []
    quadrant.getAllLeaves(listOfLeaves)
    for leaf in listOfLeaves:
           leaf.computeAllNeighbors(listOfLeaves)

    #group the leaf quadrants into clusters
    if filterType == "entropy":
        clusters = gridClusteringWithEntropyFilter(quadrant, listOfLeaves, alpha, beta)
    else:
        clusters = gridClustering(quadrant, listOfLeaves)    
    setInstanceClusterId(clusters)
    
    #apply Fayyad-Irani discretizer based on the cluster label
    discretizedDs = dataSet.FayyadIraniDiscretizer2("class")

    #apply feature selection based on InformationGain on cluster id
    selectedFeatures = discretizedDs.featureReductionViaInfoGain("class", 0.5)
    #for column in selectedFeatures: print dataSet.attributes[column].attributeName
    fssDs = dataSet.applyFSS(selectedFeatures)
    fssDiscretizedDs = discretizedDs.applyFSS(selectedFeatures)

    if exportData == True:
        projectedDs.printProjectedDataSetToFile("output/projected/" + procFileName + ".txt", " ", False)
        quadrant.printQuadrantToFile("output/clusters/" + procFileName + ".txt", "output/clusters/" + procFileName + "_v2.txt")        
        printClustersToFile(clusters, "output/clusters/" + procFileName + "_v3.txt")

    if printStatistics == True:
        quadrant.printStatistics(classAttribute)
        printClusterStatistics(clusters, classAttribute)

    return

    print ""
    print ""
    "Clusters"
    Id = 0
    listOfLeaves.sort(key = lambda quad: quad.projectedDataSet.centroidInstance.x)
    for quad in listOfLeaves:
        if Id >= 26 and Id <= 31 and Id != 27:
            instance = quad.projectedDataSet.centroidInstance2
            discInstance = Instance()            
            discretizedDs.discretizeInstance(instance, discInstance)

            line = ""        
            for k in range(len(discInstance.attributeValues)-1):
                line += str(discInstance.attributeValues[k]).rjust(2) + ","
            line += ('%.2f' % discInstance.attributeValues[len(discInstance.attributeValues)-1])             
            print line
        Id += 1


    #condensedDs = dataSet.condenseDataSet(clusters)
    #condensedFssDs = fssDs.condenseDataSet(clusters)

    #checkConsistency(dataSet, projectedDs, discretizedDs, fssDs, fssDiscretizedDs)

    #return [projectedDs, discretizedDs, fssDs, fssDiscretizedDs, condensedDs, condensedFssDs, clusters]

def EstimateMRE(actual, predicted):
    mre = 0
    for i in range(len(actual)): mre += math.fabs(predicted[i] - actual[i])
    return float(mre)/float(len(actual))

def EstimateMeanError2(actual, predicted):
    mre = 0
    for i in range(len(actual)): mre += math.fabs(predicted[i] - actual[i])
    return float(mre)/float(len(actual))

def EstimateMedianError(actual, predicted):
    errors = []
    for i in range(len(actual)): errors.append(math.fabs(predicted[i] - actual[i]))
    errors.sort()

    return errors[len(actual)/2]

def EstimateSpreadError(actual, predicted):
    errors = []
    for i in range(len(actual)): errors.append(math.fabs(predicted[i] - actual[i]))
    errors.sort()

    q1 = errors[len(actual)/4]
    q3 = errors[3*len(actual)/4]

    return q3 - q1


def EstimateVarianceError(actual, predicted):
    s = 0
    sumSqr = 0
        
    for i in range(len(actual)):
        s += math.fabs(predicted[i] - actual[i])
        sumSqr += math.fabs(predicted[i] - actual[i])**2

    size = float(len(actual))
    s = float(s)
    sumSqr = float(sumSqr)

    return  (sumSqr - ((s * s) / size )) / (size-1)


def EstimateMMRE(actual, predicted):
    mmre = 0
    n = 0
    for i in range(len(actual)):
        if actual[i] != 0: 
            mmre += math.fabs(predicted[i] - actual[i])/math.fabs(actual[i])
        n += 1

    return 100*(float(mmre)/float(n))

def EstimatePred(actual, predicted, predVal):
    pred = 0
    n = 0
    for i in range(len(actual)):
        if actual[i] != 0: 
            mre = math.fabs(predicted[i] - actual[i])/math.fabs(actual[i])
            if mre <= (predVal/100): pred += 1
        n += 1
    return 100*(float(pred)/float(n))


def test():

    for i in range(20):
        lst = []
        for j in range(20):
            if i != j: lst.append(j)

        print "i: " + str(i) + ", Index: " + str(getInsertIndex_2(lst, i))

        #for j in lst: print j
    #lst = []
    #print str(getInsertIndex(lst, 2))
    #print 'test'


def gridClusteringWithEntropyFilter(quadrant, listOfLeaves1, alpha, beta = None):
    #order quadrants by density (decreasing order)
    listOfLeaves = listOfLeaves1[:]
    listOfLeaves.sort(key = lambda leaf: 0-leaf.density)

    #estimate parameters needed for the cluster evaluation procedure
    initialEntropy = quadrant.projectedDataSet.entropy
    nrBlocks = len(listOfLeaves)
    maxSurface = (quadrant.maxX - quadrant.minX) * (quadrant.maxY - quadrant.minY)
    totalNrInstances = len(quadrant.projectedDataSet.instances)

    #reset cluster for all quadrants
    for quad in listOfLeaves: quad.cluster = None

    clusterId = 0
    clusters = []
    while len(listOfLeaves) > 0:
        centroid = listOfLeaves.pop(0) #pick the quadrant with the highest density

        if centroid.cluster != None: continue

        #initiate a new cluster and add the quadrant to it
        cluster = Cluster(clusterId)
        cluster.quadrants.append(centroid) 

        minDensity = alpha * centroid.density # estimate the minimum density required to add a quadrant to the cluster

        #get entropy and class frequencies 
        clusterEntropy = centroid.projectedDataSet.entropy
        clusterClassFrequencies = centroid.projectedDataSet.classFreq
        centroid.cluster = cluster                

        k = 0
        #process the cluster's quadrants and their neighbors
        while k < len(cluster.quadrants):
            currentQuad = cluster.quadrants[k]

            #process the neighbors of the current quadrant
            for neigh in currentQuad.neighbors:
                if neigh.cluster == None and neigh.density >= minDensity:
                    #predict class frequencies and entropy if this neighbor is added to the cluster
                    clusterClassFrequencies1 = combineClassFrequecies(clusterClassFrequencies,
                                              neigh.projectedDataSet.classFreq)
                    clusterEntropy1 = estimateEntropy(clusterClassFrequencies1)
                                           
                    #check whether entropy is decreased by adding the neighbor quadrant to the cluster
                    if clusterEntropy1 <= clusterEntropy:
                        #neighbor is added to cluster only if entropy is decreased
                        cluster.quadrants.append(neigh)
                        neigh.cluster = cluster

                        #update current entropy and class frequencies
                        clusterEntropy = clusterEntropy1
                        clusterClassFrequencies = clusterClassFrequencies1
                
                        k += 1


                cluster.mergeQuadrants() #merge the datasets of the cluster's quadrants
                cluster.computeStatistics(maxSurface) #compute the cluster's statistics

                clusters.append(cluster)
        
        clusterId += 1

    clusters.sort(key = lambda cluster: 0-len(cluster.mergedDataSet.instances))

    return clusters

def printClusterStatistics(clusters, classAttribute):
    for cluster in clusters: cluster.printClusterStatistics(classAttribute)
def exportDataSetToArff(ds, fileName):
        f = open(fileName, "w")
        ds.exportDataSetToArff(f, fileName)
        f.close()
def processFileName(fileName):
    procFileName = ""
    ind1 = fileName.rfind("/")
    if ind1 < 0: ind1 = -1
    ind2 = fileName.rfind(".")
    if ind2 < 0: ind2 = len(fileName)
    procFileName += fileName[ind1+1:ind2]
    return procFileName
def getExtension(fileName):
    procFileName = ""
    ind1 = fileName.rfind("/")
    if ind1 < 0: ind1 = 0
    ind2 = fileName.rfind(".")
    if ind2 < 0: ind2 = len(fileName)
    procFileName += fileName[ind1+1:ind2]
    return fileName[ind2:]



def getCurrentDate():
    strD = str(datetime.now())
    strD = strD.replace(":", "")
    strD = strD.replace("-", "")
    strD = strD.replace(".", "")
    strD = strD.replace(" ", "")
    return strD[0:14]

def preprocessCSV():
        fileNames = os.listdir('data_csv/') #List of accessable datasets
        for fileName in fileNames:
                if getExtension(fileName) == ".csv":
                        name = processFileName(fileName)
                        print name
                        discretizeClassToBinary_2('data_csv/' + name + '.csv', 'data_arff_2/' + name + '.arff')


main()
#versions, clusters, centroids, features=experimentTemporal()