from minings.stat import fisher
from minings.utils import utility
from minings.utils import mysqlrecord
import string,MySQLdb
import orange,numpy,operator,cPickle,xlwt,xlrd,interval
from copy import deepcopy


class MSTAT(object):
    
    def __init__(self,**kwargs):
        self.n              = None #For __call__
        self.threshold      = None #For __call__
        self.getwhat        = "NUMB"
        self.fromsvm        = False
        self.fisherTest     = False
        self.dbaselist      = None
        self.doOrgndata     = False
        self.pthreshold     = 0.5
        self.maindbname     = ""
        #Indicate whether to record over the result object or not (used with doOrgndata = True only)
        self.recordover     = False 
        self.__dict__.update(kwargs)
        #Arbitrary properties
        self.cutoff         = 8 #Criteria for big or small sample
        self.mainres        = utility.RES()
        
    def __call__(self,data,scaled):
        if len(data.domain.classVar.values) != 2:
            return None #Only number of classes = 2 is allowed
        for i in data.domain.attributes:
            if i.varType == orange.VarTypes.Discrete: #Mstat only handles continuous values right now
                return None
        if self.fisherTest == False:
            pvalue,tstat    = self.assessSigf(data) # {feature1:p1,feature2:p2 .. }
        else:
            pvalue,cp       = self.runfisher(data,scaled)
        pvalue_sort     = []
        for i in pvalue:
            pvalue_sort.append((i,pvalue[i]))
        pvalue_sort.sort(key=operator.itemgetter(1))
        if self.n != None and self.threshold == None:
            nbest_temp = pvalue_sort[:self.n]
            ftbest = [ft for ft,p in nbest_temp]
        elif self.threshold != None and self.n == None:
            pbest_temp = [(ft,p) for ft,p in pvalue_sort if (p <= self.threshold and p >= 0)]
            ftbest = [ft for ft,p in pbest_temp]
        #Craft a new dataset from the list of features
        select_features = [u for u in list(data.domain.attributes) if u.name in ftbest]
        classVar        = data.domain.classVar
        newdomain       = orange.Domain(select_features + [classVar])
        newdata         = data.select(newdomain)
        return newdata
    
    def runData(self):
        #MySQL properties
        self.R              = mysqlrecord.datasetMySQLrecord(databasename = self.maindbname)
        #Initial running
        self.R.cursor.execute("""select * from %s""" % self.R.tab_res.tname)
        row                 = self.R.cursor.fetchall()
        for i in row:
            datasetname     = i[self.R.tab_res.datasetname] #datasetname examples: gut_1_Healthy Control_Ulcerative Colitis
            datafact        = cPickle.loads(i[self.R.tab_res.datafact])
            results_svm     = cPickle.loads(i[self.R.tab_res.results_svm])
            results_mstat   = cPickle.loads(i[self.R.tab_res.results_mstat])
            if results_mstat != None:
                if self.flow.REmstat == True:
                    self.F              = datafact
                    self.results_svm    = results_svm
                    self.results_mstat  = results_mstat
                    self.mainRun()        
                else:
                    continue
            else:
                self.F              = datafact
                self.results_svm    = results_svm
                self.mainRun()
                #This means each DataFactory instance can only produce one result if self.doOrgndata is True
        self.xcelRecord()
        self.R.Close()
        
    def mainRun(self):
        if self.fromsvm == True:
            fst = "DSVM"
            if self.results_svm == None:
                print "There is no data from svm results"
                return
            else:
                for i in self.results_svm.results: #Do every result of svm
                    mdata               = deepcopy(i)
                    mdata.data          = utility.DatasetTools.finalBrush(mdata.data)
                    mdata.MSTATdsource  = fst
                    if self.fisherTest == True:
                        self.runfisher(mdata)
                    else:
                        self.runnormal(mdata)
        else:
            fst = "DNORM"
            if self.doOrgndata == True: #Each sheet of the xcel file is named mdata.datasetname
                if self.recordover == True:
                    if self.results_mstat.orgnresult is not None:
                        mdata       = self.results_mstat.orgnresult
                    else:
                        mdata       = deepcopy(self.F.orgnRes)                        
                else:
                    mdata           = deepcopy(self.F.orgnRes)
                mdata.data          = utility.DatasetTools.finalBrush(mdata.data)
##                print mdata.datasetname
##                print len(mdata.data.domain.attributes)
##                print mdata.data.domain.classVar.values
##                for cls,number in zip(mdata.data.domain.classVar.values,
##                                      orange.Distribution(mdata.data.domain.classVar,mdata.data)):
##                    print "%-24s: %s" % (cls,int(number))
                mdata.MSTATdsource  = fst
                if self.fisherTest == True:
                    self.runfisher(mdata)
                else:
                    self.runnormal(mdata)
            elif self.doOrgndata is None:
                mdata               = self.F.orgnRes
                mdata.data          = utility.DatasetTools.finalBrush(mdata.data)                
                mdata.MSTATdsource  = fst
                if self.fisherTest == True:
                    self.runfisher(mdata)
                else:
                    self.runnormal(mdata)
            else:
                for i in self.F.childdatasets:
                    mdata               = deepcopy(i)
                    mdata.data          = utility.DatasetTools.finalBrush(mdata.data)
                    mdata.MSTATdsource  = fst
                    if self.fisherTest == True:
                        self.runfisher(mdata)
                    else:
                        self.runnormal(mdata)
        self.recordall()
        
    def recordall(self):
        mainres_string      = MySQLdb.escape_string(cPickle.dumps(self.mainres,2))
        self.R.cursor.execute("""update %s set %s="%s" where %s="%s"                                                                                    
                              """ % (self.R.tab_res.tname,  self.R.tab_res.results_mstat  ,mainres_string    ,
                                                            self.R.tab_res.datasetname    ,self.F.datasetname     ))
        #Deinitial
        self.mainres        = utility.RES()
        
    def runnormal(self,mdata): #This means run t-test
        if len(mdata.data.domain.classVar.values) != 2:
            print "Only number of classes = 2 is allowed, operation aborted"
            return None
        for i in mdata.data.domain.attributes:
            if i.varType == orange.VarTypes.Discrete:
                print "MSTAT does not support discrete type dataset"
                return None
        mdata.MSTATsmethod = "SNORMAL"
        if self.fromsvm == True:
            mdata.SVMtraining_data              = mdata.SVMtraining_data
            mdata.MSTATp_tstat,mdata.MSTATinfo_tstat  = self.assessSigf(mdata.SVMtraining_data) #Use SVM training data
        else:
            mdata.MSTATp_tstat,mdata.MSTATinfo_tstat  = self.assessSigf(mdata.data) #Use data before sending to SVM for training
        #mdata.MSTATpvalue = {feature1:p1,feature2:p2 .. }
        #mdata.MSTATtstat  = {feature1:( t1,{class1:(xbar,var),class2:(xbar,var)} ) , .. } #t1 is a t value
        #Find mdata.MSTATpvalue_sort, sort pvalue from low to high and report along with x bar and std
        mdata.MSTATp_sort_tstat = []
        for i in mdata.MSTATp_tstat:
            mdata.MSTATp_sort_tstat.append((i,mdata.MSTATp_tstat[i]))
        mdata.MSTATp_sort_tstat.sort(key = operator.itemgetter(1)) #Sort from low - high
        mdata.MSTAThead = list(mdata.data.domain.classVar.values)
        if self.doOrgndata == True:
            self.mainres.orgnresult = mdata
        else:
            self.mainres.results.append(mdata)
            
    def runfisher(self,mdata): #Get p-value by Fisher exact test
        if len(mdata.data.domain.classVar.values) != 2:
            print "Only number of classes = 2 is allowed, operation aborted"
            return None
        for i in mdata.data.domain.attributes:
            if i.varType == orange.VarTypes.Discrete:
                print "MSTAT does not support discrete type dataset"
                return None
        mdata.MSTATsmethod          = "SFISHER"
        if self.fromsvm == True:
            maindata    = mdata.SVMtraining_data
        else:
            maindata    = mdata.data
        mdata.MSTATp_fisher,mdata.MSTATinfo_fisher = self.getpFisher(maindata,mdata.main) #Main Fisher calculation
        #mdata.MSTATpvalue = {feature1:p1,feature2:p2 .. }
        #mdata.MSTATcp     = {ft:{cls0:p0,cls1:p1}, .. } and p0 = f11/f21,p1 = f12/f22
        mdata.MSTATp_sort_fisher    = []
        for i in mdata.MSTATp_fisher:
            mdata.MSTATp_sort_fisher.append((i,mdata.MSTATp_fisher[i]))
        mdata.MSTATp_sort_fisher.sort(key = operator.itemgetter(1)) #Sort from low - high
        mdata.MSTAThead             = list(mdata.data.domain.classVar.values)
        if self.doOrgndata == True:
            self.mainres.orgnresult = mdata
        else:
            self.mainres.results.append(mdata)
                  
    def getpFisher(self,maindata,main):
        #Get feature map
        mapft       = self.mapFeature(maindata) #{ft0:{cls0:[v1,v2,v3 ..],cls1:[v1,v2,v3 ..] .. },ft1: }
        mapcls      = {} #{cls0:0,cls1:1}
        mapcls_r    = {}
        for i in range(len(maindata.domain.classVar.values)):
            mapcls[maindata.domain.classVar.values[i]] = i
            mapcls_r[i] = maindata.domain.classVar.values[i]
        #Generate contingency matrix like this [[2, 8], [7, 2]]
        cont        = {} #{ ft0:[[a,b],[c,d]],ft1: .. }
        pvalue      = {} #{feature1:p1,feature2:p2 .. }
        cp          = {} # {ft:{cls0:p0,cls1:p2}, .. }
        for u in mapft:
            csub = {}
            if main == 'SCAL':
                low     = self.F.scalinfo[u][0] #Get low in (low,high) of one feature scale
                high    = self.F.scalinfo[u][1]
                if low > 0:
                    base = self.F.scale_range[0] - 1
                else:
                    base = self.F.scale_range[0]
            else:
                base = 0
            cntP = [0,0]
            cntN = [0,0]
            for i in mapft:
                if i == u:
                    row1 = []
                    for j in mapft[i]:
                        cntPindex = mapcls[j]
                        for k in mapft[i][j]:
                            if k > base:
                                cntP[cntPindex] += 1
                else:
                    row2 = []
                    for j in mapft[i]:
                        cntNindex = mapcls[j]
                        for k in mapft[i][j]:
                            if k > base:
                                cntN[cntNindex] += 1
            cont[u] = [deepcopy(cntP),deepcopy(cntN)]
            for w in mapcls_r:
                try:
                    prob = cntP[w]/float(cntP[w] + cntN[w])
                except ZeroDivisionError:
                    prob = -1
                csub[mapcls_r[w]] = prob
            cp[u] = deepcopy(csub)
        for i in cont:
            try:
                if cont[i] == [[0, 1], [15, 14]]: #This will cause error
                    raise ValueError
                pvalue[i] = fisher.fisherExact(cont[i])[1]
                if numpy.isnan(pvalue[i]) == True:
                    pvalue[i] = -1
            except (ValueError,ZeroDivisionError):
                pvalue[i] = -1
        return pvalue,cp
    
    def mapFeature(self,inputdata):
        #This method could be done by select method of orange
        mapft  = {} #{ft0:{cls0:[v1,v2,v3 ..],cls1:[v1,v2,v3 ..] .. },ft1: }
        for i in range(len(inputdata.domain.attributes)): #Create hash for each feature
            mapft[inputdata.domain.attributes[i].name] = {}
            for j in range(len(inputdata.domain.classVar.values)):
                mapft[inputdata.domain.attributes[i].name][inputdata.domain.classVar.values[j]] = []
        for i in range(len(inputdata.domain.attributes)): #Fill the hashes
            for j in range(len(inputdata)):
                mapft[inputdata.domain.attributes[i].name][inputdata[j].getclass().value].append(round(inputdata[j][i].value,2))
        return mapft
    
    def calculateT(self,inputdata):
        tstat = {} #{feature1:(t1,(xbar,var,nt)),feature2:(t2,(xbar,var,nt)) .. }
        mapft = self.mapFeature(inputdata) #{ft0:{cls0:[v1,v2,v3 ..],cls1:[v1,v2,v3 ..] .. },ft1: .. }
        #Calculate t
        for i in mapft:
            statclass = {} #{class1:(xbar,var),class2:(xbar,var)}
            t_L = []
            for j in mapft[i]: #Only 2 iterations because there r 2 classes
                varray  = numpy.array(mapft[i][j])
                nt      = len(varray)
                xbar    = numpy.mean(varray)
                var     = numpy.var(varray)
                statclass[j] = (xbar,var)
                t_L.append((xbar,var,nt))
            tv = (t_L[0][0] - t_L[1][0])/numpy.sqrt( (t_L[0][1]/float(t_L[0][2])) - (t_L[1][1]/float(t_L[1][2])) )
            tstat[i] = (tv,deepcopy(statclass))
        return tstat
    
    def assessSigf(self,maindata): #Get p-value by statistical test
        clsnum = {}
        clsnum_L = [ (cls,int(number)) for cls,number in zip(maindata.domain.classVar.values,orange.Distribution(maindata.domain.classVar,maindata)) ]
        for i in clsnum_L:
            clsnum[i[0]] = i[1]
        pvalue      = {} #{feature1:p1,feature2:p2 .. }
        seedStart   = 1000
        repeat      = 100 #Number of class permutations
        B           = repeat #From the paper
        M           = len(maindata.domain.attributes) #From the paper
        cnt         = 0
        tmain       = self.calculateT(maindata)
        tall        = []
        while True:
            iseed       = seedStart * cnt
            randomdata  = orange.Preprocessor_addClassNoise(maindata, proportion = 0.9,randomGenerator=iseed)
            tvalue      = self.calculateT(randomdata) #{feature1:t1,feature2:t2 .. }
            tall.append(tvalue)
            cnt         += 1
            if cnt >= repeat:
                break
        small = False #If False, only t of feature is used
        for i in clsnum:
            if clsnum[i] < self.cutoff:
                small = True
                break
        for i in tmain:
            if small == False:
                upcnt = 0
                for j in range(len(tall)):
                    if numpy.isnan(tall[j][i][0]) == False and numpy.isnan(tmain[i][0]) == True:
                        continue
                    elif numpy.isnan(tall[j][i][0]) == True and numpy.isnan(tmain[i][0]) == True:
                        upcnt += 1
                        continue
                    elif numpy.isnan(tall[j][i][0]) == True and numpy.isnan(tmain[i][0]) == False:
                        upcnt += 1
                        continue
                    elif abs(tall[j][i][0]) >= abs(tmain[i][0]):
                        upcnt += 1
                pv = upcnt/float(B)
                pvalue[i] = pv
            else:
                upcnt = 0
                for j in range(len(tall)):
                    for k in tall[j]:
                        if numpy.isnan(tall[j][k][0]) == False and numpy.isnan(tmain[i][0]) == True:
                            continue
                        elif numpy.isnan(tall[j][k][0]) == True and numpy.isnan(tmain[i][0]) == True:
                            upcnt += 1
                            continue
                        elif numpy.isnan(tall[j][k][0]) == True and numpy.isnan(tmain[i][0]) == False:
                            upcnt += 1
                            continue
                        elif abs(tall[j][k][0]) >= abs(tmain[i][0]):
                            upcnt += 1
                pv = upcnt/(float(B) * M)
                pvalue[i] = pv
        return pvalue,tmain
    
    def xcelRecord(self):
        #Excel properties
        self.book       = xlwt.Workbook()
        self.xcelfile   = "C:\\Users\\aonlazio\\Desktop\\BILLION\\PHD\\dissertation\\documents\\PAPER_1\\MSTAT_Result_fresh.xls"
        self.R.cursor.execute("""select * from %s""" % (self.R.tab_L.tname) )
        row             = self.R.cursor.fetchall() #row = [ {'name':'gut_Entire_Colon2'},{'name':'gut_Endo_Ileum'}, .. ]
        self.namesort   = [u[self.R.tab_L.name] for u in row]
        self.name       = {} #{gut_Entire_Colon2: {Crohn__Healthy Control: best:,Polyp__Healthy Control:best}, .. }
        #Initial running
        writeXcelActivated  = False #Indicate whether the xcel file is ready to be written or not
        self.R.cursor.execute("""select * from %s""" % self.R.tab_res.tname)
        row             = self.R.cursor.fetchall()
        if row == None:
            return
        else:
            #Gather information
            for i in row:
                datasetname     = i[self.R.tab_res.datasetname] #gut_Entire_Colon2__Crohn__Healthy Control or gut_Endo_Colon2_Lumen__Crohn_Colon__Crohn_Lumen
                results_mstat   = cPickle.loads(i[self.R.tab_res.results_mstat])
                if results_mstat == None: continue
                groupname       = datasetname.split("__")[0] #gut_Entire_Colon2
                subname         = "__".join(datasetname.split("__")[1:]) #Crohn__Healthy Control
                mdata           = results_mstat.orgnresult
                try:
                    self.name[groupname][subname]   = mdata
                except KeyError:
                    self.name[groupname]            = {}
                    self.name[groupname][subname]   = mdata
            row_list            = ["Features","P-Value" ]                        
            col_width           = 5000
            self.sheet          = {}
            for i in self.namesort: #["gut_Entire_Colon2","gut_Endo_Ileum"]
                try:
                    self.name[i]
                except KeyError: continue
                writeXcelActivated  = True 
                self.sheet[i]       = self.book.add_sheet(i)
                self.sheet[i].col(0).width  = col_width
                self.sheet[i].row(0).write(0,i)
                self.sheet[i].row(1).write(0,"Positive")
                self.sheet[i].row(2).write(0,"Negative")
                cur_col         = 1
                cur_row         = 0
                for j in self.name[i]: #{gut_Entire_Colon2: {Crohn__Healthy Control: best:,Ulcerative Colitis__Healthy Control:best}, .. }
                    self.sheet[i].col(cur_col).width        = col_width
                    self.sheet[i].col(cur_col+1).width      = col_width
                    mdata       = self.name[i][j]
                    xname       = j.split("__")
                    if "Healthy Control" in xname:
                        Positive    = xname[0] if xname[0] != "Healthy Control" else xname[1]
                        Negative    = xname[1] if xname[1] == "Healthy Control" else xname[0]
                    else:
                        Positive    = xname[0]
                        Negative    = xname[1]
                    self.sheet[i].row(cur_row+1).write(cur_col,Positive)
                    self.sheet[i].row(cur_row+2).write(cur_col,Negative)
                    self.sheet[i].row(cur_row+3).write(cur_col,"Features")
                    self.sheet[i].row(cur_row+3).write(cur_col+1,"P-value")
                    temprow     = cur_row + 4
                    pselect     = [u for u in mdata.MSTATp_sort_fisher if u[1] in interval.Interval(0,self.pthreshold,lower_closed = True,upper_closed = True)] \
                                  if mdata.MSTATp_sort_fisher != None else [] #[(feature1,p1),(feature2,p2), ..] sorted from low-high
                    for k in pselect:
                        self.sheet[i].row(temprow).write(cur_col,k[0])
                        self.sheet[i].row(temprow).write(cur_col+1,"%.4f" % k[1])
                        temprow += 1
                    cur_col     += 2
        if writeXcelActivated:
            self.book.save(self.xcelfile)

