from dict_to_phyloxml import write_xml
from count_hmms import countseqs
from ssummolib import Options, seqDB, load_index
#import ArbIO
import os
import re
import sys
import time
import cPickle as pickle
import subprocess
import multiprocessing
import CONFIG
from threading import Thread
#from Queue import Queue
CONFIG.ambiguousCount = 0

def getTaxIndex():
    tdict = load_index()
    for key in tdict.keys():
        if key not in ['Bacteria','Eukaryota','Archaea']:
            del(tdict[key])
    return tdict

def parseHMMSearchPipeVerbose(subProcess,lock):
        temp_scores = {}
        for line in subProcess.stdout:
                if line.startswith('#') or line.strip() == '':
#                        if printHeaders:
                         lock.acquire()
                         print line.rstrip()
                         lock.release()
 #                       continue
                printHeaders = False
                lock.acquire()
                print line.rstrip()
                lock.release()
                results = re.split('\s+',line)
                accession = results[0]
                Evalue = float(results[4])
                score = float(results[5])
                if accession in temp_scores.keys():
                        temp_scores[accession][0].append(Evalue)
                        temp_scores[accession][1].append(score)
                else:
                        temp_scores.update( { accession : [ [Evalue] , [score] ] } )
        retCode = subProcess.wait()
        if retCode != 0:
                lock.acquire()
                print subProcess.stderr.read()
                sys.stdout.flush()
                lock.release()
                raise IOError
        subProcess.stdout.close()
        subProcess.stderr.close()
        return temp_scores

def parseHMMSearchPipe(subProcess):
        temp_scores = {}
        for line in subProcess.stdout:
            if line.startswith('#') or line.strip() == '':
                continue
            results = re.split('\s+',line)
            accession = results[0]
            Evalue = float(results[4])
            score = float(results[5])

            if accession in temp_scores.keys():
                temp_scores[accession][0].append(Evalue)
                temp_scores[accession][1].append(score)
            else:
                temp_scores.update( { accession : [ [Evalue] , [score] ] } )
            
        retCode = subProcess.wait()
        if retCode != 0:
#            lock.acquire()
            sys.stderr.write( subProcess.stderr.read() )
            sys.stderr.flush()
#            lock.release()
            raise IOError
        subProcess.stdout.close()
        subProcess.stderr.close()
        return temp_scores


def findRelPath( node, OTUname,RelPath=[] ):
        """Given a dictionary node, and an OTU name, will search for 
        OTUname from the top of node and return the relative path, 
        delimited by '/'"""
        for key in node.keys():
                if key == OTUname:
                        RelPath.append(key)
                        return RelPath
                else:
                        nextPath = RelPath + [key]
                        x = findRelPath( node[key] , OTUname, RelPath = nextPath )
                        if type(x) == list:
                                return x
                        else:
                                continue

def score(score_dict, tax_node,seq_db,path,diverge_dict={}):
        """Tallies and records all the scores from a tabular output of hmmsearch.
        score_dict is given and accessions_best is returned.
        score_dict must have have accessions as keys, and the values are a list of
        lists; the 1st list being Evalues, and the 2nd list comprising bit-scores.
        In order to index these Evalues and scores properly, tax_node must be given
        and represent the order which HMMs were processed, and therefore is used
        to index what the node the best score represents.
        """
        best_key = None
        tax_node_keys = score_dict.pop('order')
        accessions_best = {}
        num_nodes = len(tax_node_keys)
        for accession in score_dict:
#               Evalues,scores = score_dict[accession]
#               Evalues = score_dict[accession][0]  # replace these for line above.
                scores = score_dict[accession][1]
#               keyLen = len(Evalues)  ## This and the below assertion kind of unnecessary now it all works...
#               try:
#                       assert keyLen == len(scores) and keyLen == len(tax_node_keys)
#               except AssertionError:
#                       print "Problem with accession {0}".format(accession)
#                       print "Evalues: {0}".format(Evalues)
#                       print "Numbers (lengths):-\n\tkey_len: {0}\tkey_scores: {1}\tnum_nodes: {2}".format(keyLen,len(scores),num_nodes)
#                       print "Data:\n\tkey_scores: {0}\n\tNodes: {1}".format(scores,tax_node_keys)
#                       print "Score_dict items: {0}".format(str([x for x in score_dict.items()]))
#                       raise
                #best_Evalue = min(Evalues)   # Don't actually use Evalues.
                best_score = max(scores)
                n_best = scores.count(best_score)
                if n_best > 1:   ### Ambiguous nodes!!!!
                        CONFIG.ambiguousCount += 1
                        prev_index = -1
                        TopScorers = []
                        for count in xrange(n_best):
                                prev_index = scores.index(best_score, prev_index+1) ## Index all the top scores in scores.
                                TopScorers.append( tax_node_keys[prev_index] )  ## Joint-winning OTU name
                                if accession in diverge_dict.keys():    ## 
                                        startKeys = diverge_dict[accession]['startNode'].keys()
                                        if TopScorers[-1] in startKeys:
                                                diverge_dict[accession].update( { TopScorers[-1] : best_score } ) ## N.B. best_score remains unchanged each iteration of this loop.
                                        else:
                                                OTU_order = findRelPath( diverge_dict[accession]['startNode'], TopScorers[-1] )
                                                diverge_dict[accession].update( { OTU_order : best_score } )
                                else:
                                        diverge_dict.update( { accession : {TopScorers[-1] : best_score, 'start' : path ,'startNode' : tax_node }} )
#                       sys.stdout.write( ', '.join( TopScorers ) +'.\n')
                elif n_best == 1:
                        accessions_best.update( { accession : tax_node_keys[scores.index(best_score)] } )
                elif n_best == 0:
                        seqDescription = seq_db.info[accession]['desc']
                        print '##\tno matches for accession {0}'.format(accession)
                        print '##\tDescription: {0}'.format(seqDescription)
                else:
                        print "##\tn_best = {0}".format(n_best)
                        raise ValueError("Can't figure out the winning nodes")
        return accessions_best, diverge_dict

class HMMsearchThread( multiprocessing.Process ):
        def __init__(self,semaphore,inPipe,outPipe):
                multiprocessing.Process.__init__(self)
                self.hmmsearchCMD = CONFIG.hmmsearchCMD
		if not os.path.exists( self.hmmsearchCMD ):
		    self.hmmsearchCMD = os.path.join( CONFIG.hmmerdir , CONFIG.hmmsearchCMD )
		    assert os.path.exists( self.hmmsearchCMD )
                self.inPipe, self.__outPipe = (inPipe, outPipe)
                self.inQ = multiprocessing.Queue()
                self.outQ = multiprocessing.Queue()
                self.sem = semaphore
                if CONFIG.options['--max']:
                    self.command = [ self.hmmsearchCMD,'-o',os.devnull,'--max','--tblout','/dev/stdout','--noali','REPLACED','/dev/stdin' ]
                else:
                    self.command = [ self.hmmsearchCMD,'-o',os.devnull,'--tblout','/dev/stdout','--noali','REPLACED','/dev/stdin' ]
                self.printHeaders = False
        def run(self):
            self.Eval = CONFIG.options['-Eval']
            self.score = CONFIG.options['-score']
            if CONFIG.options['--verbose']:
                parser = parseHMMSearchPipeVerbose
            else:
                parser = parseHMMSearchPipe
            nseqs,HMMLocation = self.inQ.get()
            while nseqs != 'STOP':
                #process = subprocess.Popen( [self.hmmsearchCMD,'-o',os.devnull,'--max','--tblout','/dev/stdout','--noali',HMMLocation,'/dev/stdin'],shell=False,stdin=subprocess.PIPE,stderr=subprocess.PIPE,stdout=subprocess.PIPE,bufsize=-1)
                self.command[-2] = HMMLocation
                process = subprocess.Popen( self.command ,shell=False,stdin=subprocess.PIPE,stderr=subprocess.PIPE,stdout=subprocess.PIPE,bufsize=-1)
                try:
                    [ process.stdin.write( self.__outPipe.recv().format( 'fasta' ) ) for n in xrange(nseqs) ]
                except IOError:
                    sys.stderr.write( 'hmmsearch error:-\n{0}\n\n'.format(process.stderr.read() ))
                    raise
                process.stdin.close()
                OTUName = HMMLocation[HMMLocation.rfind( os.path.sep ) + 1: HMMLocation.rfind('.') ]
                nodeScores = parser( process )
                self.sem.release()
                self.outQ.put( { OTUName : nodeScores } )
                nseqs,HMMLocation = self.inQ.get()

def MergeDiverge( resultsDict, DivDict ):
        """ Given the results_node dictionary (resultsDict), add accessions present
        in DivDict to the appropriate nodes in resultsDict."""
        for accession in DivDict.keys():
            start = DivDict[accession]['start']
            for node in DivDict[accession].keys():
                if node == 'startNode' or node == 'start':
                    continue
                else:
                    if node in resultsDict.keys():
                        resultsDict[node]['accessions'].append(accession)
        return resultsDict,DivDict

def finalMerge( resultsDict, DivDict ):
        reg = '|'.join([ '({0})'.format(key) for key in resultsDict.keys() ] )
        for accession in DivDict.keys():
            start = DivDict[accession]['start']
            node = resultsDict
            firstNode = re.search(reg,start)
            if firstNode:
                    pathList = start[firstNode.start():].split( os.path.sep )
            else:
                    pathList = []
            for OTU in pathList:
                    node = node[OTU]
            if 'accessions' in node.keys():
                    node['accessions'].append( accession )
            else:
                    node.update( { 'accessions' : [accession] } )
        return resultsDict

def SepDiverge(resultsDict, DivDict,seq_db):
        """Given the results dictionary and the diverge dictionary,
        check the scores present in DivDict, looking for a single
        highest score.
        If there is a single highest score, place that in the 
        appropriate location and delete it from DivDict.
        Otherwise, don't change anything."""
#       print "##\t{0} ambiguous sequences: {1}".format(len(DivDict.keys()) ,[seq_db.info[acc]['desc']for acc in DivDict.keys()]  )
        for accession in DivDict.keys():
            nodeKeys = DivDict[accession].keys()
            del(nodeKeys [nodeKeys.index('startNode')])
            del(nodeKeys[nodeKeys.index('start')] )
            scores = []
            for node in nodeKeys:
                scores.append( DivDict[accession][node] )
            bestScore = max(scores)
            nBest = scores.count(bestScore)
            if nBest == 1:
                bestNode = nodeKeys[ scores.index(bestScore) ]
                if bestNode in resultsDict.keys():
                    resultsDict[bestNode]['accessions'].append(accession)
                else:
                    print "Can't find {0} in {1}".format(bestNode,resultsDict.keys())
                del(DivDict[accession])
            else:
                continue
        return resultsDict, DivDict

def SSUMMO(node, path,results_node,seqDB,threads,semaphore,diverge_dict={}):
        ## At each path, choose which dir to go into, by performing
        ## hmmsearch on the fasta sequence against each next HMM.
        depth = path.count( os.path.sep ) - CONFIG.arbDBdir.count( os.path.sep )
        node_keys = sorted(node.keys())
        if 'accessions' in node_keys:
                del(node_keys[node_keys.index('accessions')])
        num_nodes = len(node_keys)
        ## Call the MergeDiverge function here. Merge ambiguous results with results_node
        results_node,diverge_dict = MergeDiverge(results_node,diverge_dict)
        if num_nodes == 0:
                return results_node, diverge_dict
        if os.path.realpath(path) == os.path.realpath( CONFIG.options['-start']):
                seqs = 'all'
                accessions = seqDB.indexes 
                sequence_count = len( accessions )
        else:
                accessions = results_node['accessions']
                sequence_count = len(accessions)
                seqDB.prefetchQ.put(accessions)
        if num_nodes == 1:# or (num_nodes == 2 and 'accessions' in node_keys): ## If only one node, no need for hmmsearch'ing.
            singleNode = node_keys[0]
            accessions_best = {}
            for accession in results_node.pop('accessions'):
                accessions_best.update( { accession: singleNode } )
            #results_node[singleNode]['accessions'].update( results_node.pop('accessions') )
        else:   ## Here is the loop to be parallelised.
            tempScoreDict = {}  # { accession : [[Evalues, ... ] , [Scores, ... ]] , ... }
            for accession in accessions:
                tempScoreDict.update( { accession : [ [float() for i in xrange(num_nodes) ]] * 2 } )
            got = set()
            threadIndexes = {}
            threadInd = -1
            nThreads = len(threads) -1
            looped = 0
            for index in xrange(num_nodes):  ## Loop sends sequences to hmmsearch processes
                if threadInd == nThreads:
                    threadInd -= nThreads
                    looped += 1
                else:
                    threadInd += 1
                if looped > 0:           ## Clause to retrieve finished results.
                    nodeScores = threads[threadInd].outQ.get()
                    OTU = nodeScores.keys()[0]
                    got.add( OTU )
                    nodeInd = node_keys.index(OTU)
                    for accession, results in nodeScores[OTU].items():
                        assert len(results[0]) == 1 and len(results[1]) == 1
                        Evalue,bitScore = results
                        tempScoreDict[accession][0][nodeInd] = Evalue
                        tempScoreDict[accession][1][nodeInd] = bitScore
                temp_path = os.path.join(path, node_keys[index])
                HMM = os.path.join(temp_path,node_keys[index] +'.hmm')
                semaphore.acquire()
                try:
                    threads[threadInd].inQ.put( [sequence_count, HMM] )  ## Tell HMMsearchthread to wait for sequences
                except IndexError:
                    print threadInd
                    raise
                threadIndexes.update( { node_keys[index] : threadInd } )
                seqDB.prefetchQ.put( threadInd )   ## Tell sequence DB process which HMMSearchThread to send sequences to.
            num_got = len(got)
            while num_nodes > num_got:   ## Loop flushes out all results from hmmsearch processes.
                for nodeName in node_keys:
                    if nodeName in got:
                        continue
                    else:
                        threadIndex = threadIndexes[nodeName]
                        nodeIndex = node_keys.index( nodeName )
                        nodeScores = threads[ threadIndexes[nodeName] ].outQ.get()
                        OTU = nodeScores.keys()[0]
                        got.add(OTU)
                        num_got += 1
                        for accession, results in nodeScores[OTU].items():
                            Evalue, bitScore = results
                            tempScoreDict[accession][0][ nodeIndex ] = Evalue
                            tempScoreDict[accession][1][ nodeIndex ] = bitScore
            tempScoreDict['order'] = node_keys
            accessions_best,diverge_dict = score(tempScoreDict, node,seqDB,path,diverge_dict=diverge_dict)
        if accessions_best == {} and diverge_dict=={}:
            print "##\tNo results"
            print "##\t", node.keys()
            print "##\t", path
            return results_node, diverge_dict 
        else: ## STORE RESULTS / UPDATE RESULT DICT #####
            choices = set()
            for accession, choice in accessions_best.items():
                if choice not in results_node.keys():
                    results_node.update( { choice : {'accessions' :[accession] } } )
                else:
                    results_node[choice]['accessions'].append(accession)
                choices.add(choice)
            choices = list(choices)
        if 'accessions' in results_node.keys():
                del(results_node['accessions'])
        div_keys = sorted(diverge_dict.keys())
        nDiverge = len(diverge_dict)
        results_node, diverge_dict = SepDiverge(results_node,diverge_dict,seqDB)        ## Separate the results after doing SSUMMO on all nodes.
        if len(diverge_dict) != nDiverge:
            sys.stdout.write( '{0}{1} ambiguous accessions: '.format(depth*5*' ',len(diverge_dict)-nDiverge) )
            for divNode in diverge_dict.keys():
                if divNode in div_keys:
                    del( div_keys[ div_keys.index(divNode)] )
                    #div_keys.pop(divNode)
                sys.stdout.write( ', '.join( list( div_keys ) )+ '.\n' )
        for choice in choices: ## Recursively do SSUMMO.
#               print "##\tnumber of seqs assigned to {0} is {1}".format(repr(choice),len(results_node[choice]['accessions']))
            ## If node has no results, delete it.
            if len(results_node[choice]['accessions']) == 0:
                del(node[choice])
            ## otherwise recursively do SSuMMO on winning node.
            else:
                ### Tree printing ###
                if choice == choices[0] and len(choices) == 1:
                    sys.stdout.write( '{0}|- {1} ({2})\n'.format(' '*depth*5, choice, len(results_node[choice]['accessions'] ) ) )
                elif choice == choices[0]:
                    sys.stdout.write( '{0}+- {1} ({2})\n'.format(' '*depth*5, choice, len(results_node[choice]['accessions']) ) )
                elif choice == choices[-1]:
                    sys.stdout.write( '{0}|_ {1} ({2})\n'.format( depth * 5 * ' ', choice,len(results_node[choice]['accessions']) ) )
                else:
                    sys.stdout.write( '{0}|- {1} ({2})\n'.format( depth * 5 * ' ', choice,len(results_node[choice]['accessions']) ) )
                sys.stdout.flush()
                ######################
                results_node[choice], diverge_dict = SSUMMO(node.pop(choice), os.path.join(path, choice ), results_node.pop(choice),seqDB,threads,semaphore,diverge_dict=diverge_dict)
        return results_node, diverge_dict

def findStart( tdict ):
        """Locates the directory where to enter the SSUMMO loop.
        Default is to start in arbDBdir, which is configured in
        CONFIG.py.
        To change, give the command option '-start /some/path/to/dir'
        """
        if os.path.realpath(CONFIG.options['-start']) == os.path.realpath(CONFIG.arbDBdir):
                return tdict, CONFIG.arbDBdir
        else:
                found = False
                startKeys = tdict.keys()
                startDir = CONFIG.options['-start']
                startDir = startDir.rstrip(os.path.sep)
                pathList = CONFIG.options['-start'].split( os.path.sep)
                for key in startKeys:
                        if key in pathList:
                                firstNode = pathList.index(key)
                                found = True
                                break
                        else:
                                continue
                node = tdict
                if not found:
                        return tdict, CONFIG.arbDBdir
                for nodeName in pathList[firstNode:]:
                        if nodeName.strip() == '':
                                continue
                        else:
                                node = node[nodeName]
                                parentNode = nodeName
                startDir = os.path.join( CONFIG.arbDBdir,os.path.sep.join(pathList[firstNode:]) )
                CONFIG.options['-start'] = startDir
        print "\n##\tStarting SSUMMO from node '{0}' at path '{1}'".format(parentNode, startDir)
        return node, startDir


def prepare( ):
        """Given the name of a sequence file, makes a blast database
        and provides the entry point for the SSUMMO algorithm. Returns
        a dictionary of results containing taxonomic nodes with matches
        and at each node the accessions that have been assigned there.
        """
        ## Inititiate threads
        workerSem = multiprocessing.Semaphore(int( CONFIG.options['-ncpus']))
#        workerLock = multiprocessing.RLock()
        threads = []
        pipes = [multiprocessing.Pipe() for worker in xrange( int(CONFIG.options['-ncpus']))]
        for i in xrange(int(CONFIG.options['-ncpus'])):
                threads.append( HMMsearchThread( workerSem, pipes[i][0], pipes[i][1] ) )
                threads[-1].start()
        threads[0].printHeaders = True
        seq_file_name = CONFIG.options['-in']
        prefetchQ = multiprocessing.Queue() ; outQ = multiprocessing.Queue()
        seqDBprocess = seqDB(seq_file_name,prefetchQ,outQ,pipes) 
        seqDBprocess.start()
        t0 = time.time()
        tdict = getTaxIndex()
#        seqDB = ArbIO.ArbIO(inHandle = seq_file_name,index=False)
#        nQuerySeqs = seqDB.indexAndInfo()
        t = time.time()
        #print "##\tIndexed {0} sequences from the sequence file in {1} seconds".format(len(seqDBprocess.indexes),t - t0)
        results = {}
        startNode, startDir = findStart(tdict )
        print "starting at '{0}'".format(startDir)
        seqDBprocess.indexes = outQ.get()
        results_dict, diverge_dict = SSUMMO(startNode, startDir, results,seqDBprocess,threads,workerSem)
        seqDBprocess.prefetchQ.put('END')
        if len(results_dict.keys()) == 0:
                sys.stderr.write( "Not a single sequence assigned unambiguously!")
        else:
                results_dict = finalMerge( results_dict, diverge_dict )
        t = time.time() - t0
        for thread in threads:
                thread.inQ.put(['STOP',None])
                thread.join()
        print "##\tprocessed {0} sequences in {1} seconds".format(countseqs(CONFIG.arbDBdir, file_name = seq_file_name), t)
        return results_dict


def parseArgs( args ):
        options = Options()
        options.options = {'-start' : CONFIG.arbDBdir,
                '-in' : None ,
                '-out': None , 
                '-ncpus': multiprocessing.cpu_count()-1 ,
                '-Eval': str(10),
                '-score': str(1),
                '-servers': ('localhost'),
                '--createXML' : False,
                '--verbose' : False,
                '--max':False
                }
        options.helpTxt = {'-start' : 'Start node for SSUMMO. Can be a domain, or deeper, but must start at least with the domain',
                '-in' : 'Query sequence file.',
                '-out': 'Output results file names. Suffix will change. [Optional - prefix inferred from -in]',
                '-ncpus':'Number of worker processes to initiate. [CPU count - 1]',
                '-servers': 'Server names. [Not implemented yet].',
                '-Eval': 'hmmsearch Evalue threshhold [10]',
                '-score':'hmmsearch score threshold [1]'
                }
        options.switches = {
                '--createXML':'Create phyloxml output?? [No]',
                '--verbose' : 'Print HMMer results [No!]',
                '--max' : 'Use hmmsearch --max flag (bypass filters that remove sequences from full scoring set). [no]'
                }
        options.singleargs = [ '-in','-out','-ncpus','-servers','-createXML','-start']
        options.parseArgs( args )
        if options['-out'] == None:
            prefix = options['-in'][ : options['-in'].rfind('.') ]
            options['-out'] = prefix
        return options


def main( options ):
        t0 = time.time()
        CONFIG.options = options
        results_dict = prepare( )

        print "##\t{0} ambiguous results".format(CONFIG.ambiguousCount)
        ### Save results files
        print "##\tSaving pickled results file to '{0}.pkl'".format( options['-out'] )
        with file('{0}.pkl'.format( CONFIG.options['-out']),'wb') as pickle_file:
            pickle.dump( results_dict, pickle_file)
        if options['--createXML']:
            startNode,startDir = findStart( getTaxIndex())
            print "##\tWriting phyloxml file to '{0}.xml'".format(options['-out'])
            with file('{0}.xml'.format(options['-out']),'w') as write_handle:
                write_xml(startDir,write_handle,results_dict)
        t = time.time()
        mins = int(t-t0) / 60
        print "##\tSuccessfully finished in {0} mins{1} secs".format( mins, (t - t0) - (mins*60))


if __name__ == '__main__':
        args = sys.argv[1:]
        CONFIG.options = parseArgs( args )
        main( CONFIG.options )
