#!/usr/bin/env python
from dict_to_phyloxml import write_xml
from count_hmms import countseqs
from ssummolib import Options, load_index
import os
import re
import sys
import time
import cPickle as pickle
#import cProfile
import subprocess
import multiprocessing
import CONFIG
import Bio.SeqRecord
from Bio import SeqIO
#import ArbIO
from threading import Thread
CONFIG.ambiguousCount = 0

def getTaxIndex():
    tdict = load_index()
    for key in tdict.keys():
        if key not in ['Bacteria','Eukaryota','Archaea']:
            del(tdict[key])
    return tdict

def parseHMMSearchPipeVerbose(subProcess,lock=None):
    if lock is None:
        lock = multiprocessing.Lock()
    temp_scores = {}
    for line in subProcess.stdout:
        if line.startswith('#') or line.strip() == '':
            if CONFIG.printHeaders:
                lock.acquire()
                print line.rstrip()
                lock.release()
            continue
        lock.acquire()
        print line.rstrip()
        lock.release()
        results = re.split('\s+',line)
        accession = results[0]
        Evalue = float(results[4])
        score = float(results[5])
        yield ( accession, Evalue, score, )
    retCode = subProcess.wait()
    if retCode != 0:
        sys.stderr.write( subProcess.stderr.read() )
        sys.stderr.flush()
        raise IOError
    subProcess.stdout.close()
    subProcess.stderr.close()
    #return temp_scores

def parseHMMSearchPipe(subProcess):
    for line in subProcess.stdout:
        if line.startswith('#'):
            continue
        results = re.split('\s+',line)
        accession = results[0]
        Evalue = float(results[4])
        score = float(results[5])
        yield ( accession, Evalue, score, )
        #if accession in temp_scores.keys():
        #    temp_scores[accession][0].append(Evalue)
        #    temp_scores[accession][1].append(score)
        #else:
        #    temp_scores.update( { accession : [ [Evalue] , [score] ] } )
    retCode = subProcess.wait()
    if retCode != 0:
        sys.stderr.write( subProcess.stderr.read() )
        sys.stderr.flush()
        raise IOError
    subProcess.stdout.close()
    subProcess.stderr.close()

def MergeDiverge( resultsDict, DivDict ):
    """ Given the results_node dictionary (resultsDict), add accessions present
    in DivDict to the appropriate nodes in resultsDict."""
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        for node in DivDict[accession].keys():
            if node == 'start_node' or node == 'start':
                continue
            else:
                if node in resultsDict.keys():
                    resultsDict[node]['accessions'].append(accession)
    return resultsDict,DivDict

def finalMerge( resultsDict, DivDict ):
    reg = '|'.join([ '({0})'.format(key) for key in resultsDict.keys() ] )
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        node = resultsDict
        firstNode = re.search(reg,start)
        if firstNode:
            pathList = start[firstNode.start():].split( os.path.sep )
        else:
            pathList = []
        for OTU in pathList:
            node = node[OTU]
        if 'accessions' in node.keys():
            node['accessions'].append( accession )
        else:
            node.update( { 'accessions' : [accession] } )
    return resultsDict

def SepDiverge(resultsDict, DivDict,seq_db):
    """Given the results dictionary and the diverge dictionary,
    check the scores present in DivDict, looking for a single
    highest score.
    If there is a single highest score, place that in the 
    appropriate location and delete it from DivDict.
    Otherwise, don't change anything."""
#   print "##\t{0} ambiguous sequences: {1}".format(len(DivDict.keys()) ,[seq_db.info[acc]['desc']for acc in DivDict.keys()]  )
    for accession in DivDict.keys():
        nodeKeys = DivDict[accession].keys()
        del(nodeKeys [nodeKeys.index('start_node')])
        del(nodeKeys[nodeKeys.index('start')] )
        scores = []
        for node in nodeKeys:
            scores.append( DivDict[accession][node] )
        bestScore = max(scores)
        nBest = scores.count(bestScore)
        if nBest == 1:
            bestNode = nodeKeys[ scores.index(bestScore) ]
            if bestNode in resultsDict.keys():
                resultsDict[bestNode]['accessions'].append(accession)
            else:
                print "Can't find {0} in {1}".format(bestNode,resultsDict.keys())
            del(DivDict[accession])
        else:
            continue
    return resultsDict, DivDict

def find_rel_path( node, OTUname,RelPath=[] ):
    """Given a dictionary node, and an OTU name, will search for 
    OTUname from the top of node and return the relative path, 
    delimited by '/'"""
    for key in node.keys():
        if key == OTUname:
            RelPath.append(key)
            return RelPath
        else:
            nextPath = RelPath + [key]
            x = find_rel_path( node[key] , OTUname, RelPath = nextPath )
            if type(x) == list:
                return x
            else:
                continue

def score(score_dict, tax_node,seq_db,path,diverge_dict={}):
        """Tallies and records all the scores from a tabular output of hmmsearch.
        score_dict is given and accessions_best is returned.
        score_dict must have have accessions as keys, and the values are a list of
        lists; the 1st list being Evalues, and the 2nd list comprising bit-scores.
        In order to index these Evalues and scores properly, tax_node must be given
        and represent the order which HMMs were processed, and therefore is used
        to index what the node the best score represents.
        """
        best_key = None
        tax_node_keys = score_dict.pop('order')
        accessions_best = {}
        num_nodes = len(tax_node_keys)
        for accession in score_dict:
#           Evalues,scores = score_dict[accession]
#           Evalues = score_dict[accession][0]  # replace these for line above.
            scores = score_dict[accession][1]
            #best_Evalue = min(Evalues)   # Don't actually use Evalues.
            best_score = max(scores)
            n_best = scores.count(best_score)
            if n_best > 1:   ### Ambiguous nodes!!!!
                CONFIG.ambiguousCount += 1
                prev_index = -1
                TopScorers = []
                for count in xrange(n_best):
                    prev_index = scores.index(best_score, prev_index+1) ## Index all the top scores in scores.
                    TopScorers.append( tax_node_keys[prev_index] )  ## Joint-winning OTU name
                    if accession in diverge_dict.keys():    ## 
                        startKeys = diverge_dict[accession]['start_node'].keys()
                        if TopScorers[-1] in startKeys:
                            diverge_dict[accession].update( { TopScorers[-1] : best_score } ) ## N.B. best_score remains unchanged each iteration of this loop.
                        else:
                            OTU_order = find_rel_path( diverge_dict[accession]['start_node'], TopScorers[-1] )
                            diverge_dict[accession].update( { OTU_order : best_score } )
                    else:
                        diverge_dict.update( { accession : {TopScorers[-1] : best_score, 'start' : path ,'start_node' : tax_node }} )
#                       sys.stdout.write( ', '.join( TopScorers ) +'.\n')
            elif n_best == 1:
                accessions_best.update( { accession : tax_node_keys[scores.index(best_score)] } )
            elif n_best == 0:
                seqDescription = seq_db.info[accession]['desc']
                print '##\tno matches for accession {0}'.format(accession)
                print '##\tDescription: {0}'.format(seqDescription)
            else:
                print "##\tn_best = {0}".format(n_best)
                raise ValueError("Can't figure out the winning nodes")
        return accessions_best, diverge_dict

def score2( scorer_pipe, tax_node, seq_db, path, diverge_dict = {} ):
    inval = scorer_pipe.recv()  # This'll be the node names.
    nodes = inval
    inval = scorer_pipe.recv()  # This'll then be accession, results.
    results_dict = {}
    choices = set()
    unique_accessions = []
    ambiguous_nodes = set()
    while inval != 'end':
        accession, results = inval
        Evals, scores = results
        best_score = max( scores )
        n_best = scores.count( best_score )
        if n_best == 1:
            choice = nodes[scores.index( best_score )]
            if choice in choices:
                results_dict[choice]['accessions'].append( accession )
            else:
                choices.add( choice )
                results_dict.update( { choice : {'accessions': [accession] } } )
            unique_accessions.append( accession )
        elif n_best > 1: ## Ambiguous node!
            CONFIG.ambiguousCount += 1
            prev_index = -1
            top_scorers = []
            for count in xrange( n_best ):
                prev_index = scores.index( best_score, prev_index + 1 )
                top_scorers.append( nodes[prev_index] )
                if accession in diverge_dict.keys():
                    start_keys = diverge_dict[accession]['start_node'].keys()
                    if top_scorers[-1] in start_keys:
                        diverge_dict[accession].update( { top_scorers[-1] : best_score } )  ## N.B. best_score remains unchanged each iteration of this loop.
                    else:
                        OTU_order = find_rel_path( diverge_dict[accession]['start_node'] , top_scorers[-1] )
                        diverge_dict[accession].update( { OTU_order : best_score } )
                else:
                    diverge_dict.update( { accession : { top_scorers[-1] : best_score, 'start' : path , 'start_node' : tax_node } } )
            ambiguous_nodes.add( tuple( top_scorers ) )
            #print 'Ambiguous node between: {0}'.format( ' & '.join( top_scorers ) )
        elif n_best == 0:
            print '##\tno matches for accession {0}'.format(accession)
        else:
            print "##\tn_best = {0}".format(n_best)
            raise ValueError("Can't figure out the winning nodes")
        inval = scorer_pipe.recv()
    if len( ambiguous_nodes ) > 0:
        print 'Ambiguous nodes:-{0}'.format( '\n\t'.join( [ ' & '.join(ambiguous_node) for ambiguous_node in ambiguous_nodes ] ) )
    return results_dict, diverge_dict

class Scorer( Thread ):
    """
    Let's put (through inQ) results here as they're produced by
    parseHMMThread. This will continue to collate and order them
    as they're made ready. As soon as a sequence has all results
    made ready for it, then we'll pass over to scorer.
    """
    def __init__( self, hmmsearch_outQ, result_pipe, distributor_queue ):
        Thread.__init__(self)
        self.inQ = hmmsearch_outQ
        self.out_pipe = result_pipe
        self.d_Q = distributor_queue
    def _reset( self ):
        self.results = {}
        self.n_accessions_got = {}
        self.results_val = []
        self.hmm_indices = {}
    def run(self):
        inval = self.inQ.get()
        self._reset()
        while inval != 'END':
            if type( inval ) is tuple: # ( OTU, accession, Eval, score ) . From HMMSearchThread's
                OTU, accession, Eval, score = inval 
                HMM_index = self.hmm_indices[ OTU ]
                if accession in self.results:
                    self.results[accession][0][ HMM_index ] = Eval
                    self.results[accession][1][ HMM_index ] = score
                    self.n_accessions_got[accession] += 1
                    if self.n_accessions_got[accession] == self.n_to_do:
                        self.out_pipe.send( (accession, self.results.pop( accession ),) )
                else:
                    self.results.update( { accession : [ self.results_val[:], self.results_val[:] ] } )
                    self.n_accessions_got.update( { accession : 1 } )
                    self.results[accession][0][ HMM_index ] = Eval
                    self.results[accession][1][ HMM_index ] = score
            elif type( inval ) is list:  # Initiate variables. # This comes first. 
                if inval[0] != 'end':   # List comes from distributor.
                    self._reset()
                    nodes = inval  ## Sorted list of node names.
                    for node in nodes:
                        self.results_val.append( float() )
                    self.out_pipe.send( nodes )
                    self.n_to_do = len( nodes )
                    for i in xrange( self.n_to_do ):
                        self.hmm_indices.update( { nodes[i] : i } )
                    n_done = 0
                else:  # Then the first invalue is 'end'  # From HMMsearchThread.
                    n_done += 1
                    self.d_Q.put( inval[1] )  # --> Tell distributor what Thread just finished.
                    if n_done == self.n_to_do:
                        for accession in self.results.keys():
                            self.out_pipe.send( ( accession, self.results.pop(accession), ) )
                        self.out_pipe.send( 'end' ) # --> score2
                        del( self.results )
            inval = self.inQ.get()
#        self.out_pipe.close()
        return

class HMMsearchThread( multiprocessing.Process ):
    def __init__(self,inPipe,outPipe, outQ, lock):
        multiprocessing.Process.__init__(self)
        self.hmmsearch = os.path.join( CONFIG.hmmerdir, CONFIG.hmmsearchCMD )
        self.inPipe, self.__outPipe = (inPipe, outPipe, )
        self.outQ = outQ  ## Goes to Scorer.
        #self.sem = semaphore
        if CONFIG.options['--max']:
            self.command = [ self.hmmsearch,'-o',os.devnull,'--max','--tblout','/dev/stdout','--noali','REPLACED','/dev/stdin' ]
        else:
            self.command = [ self.hmmsearch,'-o',os.devnull,'--tblout','/dev/stdout','--noali','REPLACED','/dev/stdin' ]
        CONFIG.printHeaders = False
        self.lock = lock
    def run(self):
        self.Eval = CONFIG.options['-Eval']
        self.score = CONFIG.options['-score']
        if CONFIG.options['--verbose']:
            parser = parseHMMSearchPipeVerbose
        else:
            parser = parseHMMSearchPipe
        nseqs,HMMLocation = self.__outPipe.recv()
        while nseqs != 'END':
            self.command[-2] = HMMLocation  ### This replaces 'REPLACED' in self.command
            process = subprocess.Popen( self.command ,shell=False,stdin=subprocess.PIPE,stderr=subprocess.PIPE,stdout=subprocess.PIPE,bufsize=-1)
            try:
                #[ process.stdin.write( self.__outPipe.recv().format( 'fasta' ) ) for n in xrange(nseqs) ]
                #[ process.stdin.write( self.__outPipe.recv() ) for n in xrange(nseqs) ]
                for seq in self.__outPipe.recv():
                    process.stdin.write( seq )
            except IOError:
                self.lock.acquire()
                sys.stderr.write( 'hmmsearch error:-\n{0}\n\n'.format(process.stderr.read() ))
                sys.stderr.flush()
                self.lock.release()
                raise
            process.stdin.close()
            OTUName = HMMLocation[HMMLocation.rfind( os.path.sep ) + 1: HMMLocation.rfind('.') ]
            for accession, Eval, score in parser( process ):
                self.outQ.put( ( OTUName, accession, Eval, score, ) )
            #nodeScores = parser( process )
            #self.sem.release()
            self.outQ.put( ['end',OTUName] ) ## --> Scorer
            #self.outQ.put( { OTUName : nodeScores } )
            nseqs,HMMLocation = self.__outPipe.recv()
        self.__outPipe.close()
        return

def SSUMMO(node, path,results_node,seqDB,result_pipe,diverge_dict={}):
    ## At each path, choose which dir to go into, by performing
    ## hmmsearch on the fasta sequence against each next HMM.
    depth = path.count( os.path.sep ) - CONFIG.arbDBdir.count( os.path.sep )
    node_keys = sorted(node.keys())
    if 'accessions' in node_keys:
        del(node_keys[node_keys.index('accessions')])
    num_nodes = len(node_keys)
    ## Call the MergeDiverge function here. Merge ambiguous results with results_node
    results_node,diverge_dict = MergeDiverge(results_node,diverge_dict)
    if num_nodes == 0:
        return results_node, diverge_dict
    seqDB.outQ.put(( path, node_keys, )) ## --> sequence_distributor.
    if os.path.realpath(path) == os.path.realpath( CONFIG.options['-start']):
        seqs = 'all'  ## First time we enter SSUMMO(), seqDB already has all sequences loaded.
        seqDB.prefetchQ.put( ('get_all',None ) )
    else:
        accessions = results_node['accessions']
        seqDB.prefetchQ.put(('get',accessions))  ## --> seqDB.
    if num_nodes == 1:  ## If only one node, no need for hmmsearch'ing.
        singleNode = node_keys[0]
        results_node.update( { singleNode: { 'accessions' : results_node.pop( 'accessions' ) } } )
        ######## SSUMMOv05  ################
        seqDB.prefetchQ.put( 'Skip' )
        choices = node_keys
    else:  
        seqDB.prefetchQ.put( 'Proceed' )
        results_node, diverge_dict = score2(result_pipe, node,seqDB,path,diverge_dict=diverge_dict)
        #################### END SSUMMOv05 VERSION ##############
        if 'accessions' in results_node.keys():
            print 'still needed at line 378'
            del(results_node['accessions']) ## Delete so that the results_node doesn't keep growing. This pushes each result to branch ends.
    if len(results_node) == 0 and diverge_dict=={}:
        print "##\tNo results"
        print "##\t", node.keys()
        print "##\t", path
        return results_node, diverge_dict 
    else: ## STORE RESULTS / UPDATE RESULT DICT #####
        choices = sorted( results_node.keys() )
    div_keys = sorted(diverge_dict.keys())
    nDiverge = len(diverge_dict)
    results_node, diverge_dict = SepDiverge(results_node,diverge_dict,seqDB)        ## Separate the results after doing SSUMMO on all nodes.
    if len(diverge_dict) != nDiverge:
        sys.stdout.write( '{0}{1} ambiguous accessions: '.format(depth*5*' ',len(diverge_dict)-nDiverge) )
        for divNode in diverge_dict.keys():
            if divNode in div_keys:
                del( div_keys[ div_keys.index(divNode)] )
            sys.stdout.write( ', '.join( list( div_keys ) )+ '.\n' )
    for choice in choices: ## Recursively do SSUMMO.
        if len(results_node[choice]['accessions']) == 0:
            del(node[choice])  ## If node has no results, delete it.
        else:  ## otherwise recursively do SSuMMO on winning node.
            print_ASCII_tree( choice, results_node, depth )
            ###############
            results_node[choice], diverge_dict = SSUMMO(node.pop(choice), os.path.join(path, choice ), results_node[choice],seqDB,result_pipe,diverge_dict=diverge_dict)
    return results_node, diverge_dict

def print_ASCII_tree( choice,results_node,depth ):
    choices = sorted( results_node.keys() )
    if choice == choices[0] and len(choices) == 1:
        sys.stdout.write( '{0}|- {1} ({2})\n'.format(' '*depth*5, choice, len(results_node[choice]['accessions'] ) ) )
    elif choice == choices[0]:
        sys.stdout.write( '{0}+- {1} ({2})\n'.format(' '*depth*5, choice, len(results_node[choice]['accessions']) ) )
    elif choice == choices[-1]:
        sys.stdout.write( '{0}|_ {1} ({2})\n'.format(' '*depth*5, choice, len(results_node[choice]['accessions']) ) )
    else:
        sys.stdout.write( '{0}|- {1} ({2})\n'.format(' '*depth*5, choice, len(results_node[choice]['accessions']) ) )
    sys.stdout.flush()

def findStart( tdict ):
    """Locates the directory where to enter the SSUMMO loop.
    Default is to start in arbDBdir, which is configured in
    CONFIG.py.
    To change, give the command option '-start /some/path/to/dir'
    """
    if os.path.realpath(CONFIG.options['-start']) == os.path.realpath(CONFIG.arbDBdir):
        return tdict, CONFIG.arbDBdir
    else:
        found = False
        startKeys = tdict.keys()
        startDir = CONFIG.options['-start']
        startDir = startDir.rstrip(os.path.sep)
        pathList = CONFIG.options['-start'].split( os.path.sep)
        for key in startKeys:
            if key in pathList:
                firstNode = pathList.index(key)
                found = True
                break
            else:
                continue
        node = tdict
        if not found:
            return tdict, CONFIG.arbDBdir
        for nodeName in pathList[firstNode:]:
            if nodeName.strip() == '':
                continue
            else:
                node = node[nodeName]
                parentNode = nodeName
        startDir = os.path.join( CONFIG.arbDBdir,os.path.sep.join(pathList[firstNode:]) )
        CONFIG.options['-start'] = startDir
    print "\n##\tStarting SSUMMO from node '{0}' at path '{1}'".format(parentNode, startDir)
    return node, startDir

class seqDB( multiprocessing.Process ):
    def __init__(self,seqfile,prefetchQ,distribute_Q,threads,lock,reverse_pipe=None,format='fasta'):
        """This is the only process that reads the sequence input file (seqfile).

        prefetchQ must be a multiprocessing.Queue() object. In this, you must put the
           following keywords:-
              1a)  'all', or:-
               b)  [list of accessions].
              2a)  'Proceed', or:-
               b)  'Skip'.
              3)   'END'.
        These will:-
              1a)  Load all sequences.
               b)  Load all sequences in the list of accessions (SeqRecord.id)
              2a)  Tell the distributor to send those accessions to HMMSearchThread's.
               b)  Forget about loaded accessions.
              3)   Close prefetchQ and return. seqDB_instance.join() should then be 
                      called.
            
        distribute_Q - Don't touch. Scorer uses this to synchronise finished nodes.
        threads   - list of threads which sequences shall be distributed to.
        lock      - multiprocessing.Lock() instance
        semaphore - multiprocessing.semaphore() instance.
        reverse_pipe - Pipe used by seqDB.reverse()
                       Send accessions down the other pipe end, and
                       this will reverse them.
        format    - format of sequences in the sequence file.
        """
        multiprocessing.Process.__init__(self)
        self.prefetchQ = prefetchQ
        self.seqfile = file(seqfile,'r')
        self.format = format
        self.rpipe = reverse_pipe
        self.lock = lock
        #self.outQ = multiprocessing.Queue()  # Queue going to distributor.
        self.distributor = sequence_distributor( distribute_Q , threads )
        self.outQ = distribute_Q
        self.distributor.start()
        self.functions = {
                'reverse'   : self.reverse,
                'forget'    : self.forget,
                'slice'     : self.slice
                'return_all': self.return_all
                'get'       : self.get
                }
    def run(self):
        self.seqs = {}
        for seq in SeqIO.parse( self.seqfile , self.format ):
            self.seqs.update( { seq.id : seq } )
        self.lock.acquire()
        sys.stdout.write(  "Read {0} sequences from {1}\n".format( len( self.seqs) , self.seqfile.name ) )
        sys.stdout.flush()
        self.lock.release()
        self.seqfile.close()
        self.slice_all()  ## Uses prefetch Q.
        if self.rpipe != None:
            self.reverse()   # Enter reverse loop. This shall slice the sequences too.
        self.seq_strings = {}
        for seq in self.seqs:
            self.seq_strings.update( { seq : self.seqs[seq].format('fasta' ) } )
        del( self.seqs )
        inval = self.prefetchQ.get()
        while inval != 'END':
            fn = self.functions[ inval[0] ]
            args = inval[1:]
            fn( *args=args )
            inval = self.prefetchQ.get()
            while inval in ['Proceed', 'Skip' ]: #Made a loop in case of parents in a row with 1 child.
                self.outQ.put( inval )
                inval = self.prefetchQ.get()
        self.shutdown()
    def return_all(self):
        for seq in self.seq_strings.values():
            self.outQ.put( seq )
        return
    def get( self, accessions ):
        for acc in accessions:
            self.outQ.put( self.seq_strings[acc] )
        return
        
    def shutdown(self):
        self.outQ.put( 'END' )  ## --> sequence_distributor
        self.prefetchQ.close()  ## End SeqDBQ
        self.distributor.join()
        return
    def slice_all():
        inval = self.prefetchQ.get()
        while inval != 'END':
            id, start, end = inval
            self.slice( id, start, end )
            inval = self.prefetchQ.get()
        return

    def slice( self , accession, start, end ):
        """This will slice sequence with seq.id accession, from
        start to end, INCLUSIVE."""
        self.seqs[ accession ] = self.seqs[ accession ][ start : end + 1 ]
    def reverse( self ):
        if self.rpipe is None:
            raise ValueError( "No reverse pipe given to seqDB!!" )
        for fasta_sequence in self.seqs.values():
            self.outQ.put( fasta_sequence.format('fasta') )  # To distributor
        self.outQ.put( 'Proceed' )
        to_reverse = self.rpipe.recv()
        reversing = 0
        while to_reverse != 'END':
            self.seqs[to_reverse].seq = self.seqs[to_reverse].seq.reverse_complement()
            reversing += 1
            to_reverse = self.rpipe.recv()
        self.lock.acquire()
        print 'Reversed {0} sequences'.format( reversing )
        self.lock.release()
        self.rpipe.close()

class sequence_distributor( Thread ):
    def __init__( self, seqDBQ, hmmsearch_threads ):
        #multiprocessing.Process.__init__(self)
        Thread.__init__( self )
        self.inQ = seqDBQ
        self.threads = hmmsearch_threads
        self.n_threads = len( self.threads )
        self.thread_nums = set( range( self.n_threads ) )
#        self.sem = semaphore
    def run( self ):
        self.threadInd = 0
        inval = self.inQ.get()
        seqs = set()
        while inval != 'END':
            if inval[:1] == '>':
                seqs.add( inval.format('fasta') )  # <-- seqDB
            elif type(inval) == tuple:
                directory, nodes = inval  # <-- SSUMMO / Preparer.reverser
                self.nodes = nodes
                paths = [ os.path.join( directory, node, node + '.hmm' ) for node in nodes ]
                del( directory ) 
            elif inval == 'Proceed':  # <-- seqDB
                self.threads[0].outQ.put( nodes ) # --> Scorer. Tells order of nodes to search.
                self.distribute( seqs, paths )
                seqs = set()
            elif inval == 'Skip':
                seqs = set()
            else:
                raise TypeError( 'unknown type: {0} with val {1}'.format( type(inval),inval ) )
            inval = self.inQ.get()
        for thread in self.threads:
            thread.inPipe.send( ('END', None, ) )
        for thread in self.threads:  # May as well do it in two loops.
            thread.join()
        return

    def distribute( self, seqs, paths ):
        self.threadInd = 0
        num_nodes = len(paths)
        self.node_nums = range( num_nodes )
        num_seqs = len( seqs )
        self.thread_indices = {}
        min_val = min( [num_nodes, self.n_threads] )
        for i in xrange( min_val ):
            self.threads[i].inPipe.send( [ num_seqs , paths[i] ] )
            self.thread_indices.update( { self.nodes[i] : i } )
            self.threads[i].inPipe.send( seqs )
            self.threadInd += 1
            #self.sem.acquire()
        if min_val != num_nodes:  # If there's less HMMSearchThread's than number of nodes.
            for index in xrange( min_val , num_nodes ):
                self.get_free_thread()
                #self.sem.acquire()
                self.threads[self.threadInd].inPipe.send( [ num_seqs , paths[index] ] )
                self.thread_indices.update( { self.nodes[index] :self.threadInd } )
                self.threads[self.threadInd].inPipe.send( seqs )
                #for seq in seqs:
                #    self.threads[self.threadInd].inPipe.send( seq )
            for index in xrange( min_val ):
                self.thread_indices.pop( self.inQ.get() )  #  Must come from Scorer.
        else:
            for i in self.node_nums:
                self.thread_indices.pop( self.inQ.get() )  #
        return

    def get_free_thread( self ):
        self.threadInd += 1
        if self.threadInd == self.n_threads:
            self.threadInd = 0
        entered = self.threadInd
        vals = set( self.thread_indices.values() )
        difference = self.thread_nums.difference( vals )
        if len( difference ) > 0:
            self.threadInd = difference.pop()
        else:
            node_name = self.inQ.get()
            self.threadInd = self.thread_indices.pop( node_name ) # Removes the thread index from thread_indices.values()
        return

class Preparer( ):
    def __init__( self ):
        self.seqDB = seqDB
    def prepare( self ):
        """Given the name of a sequence file, makes a blast database
        and provides the entry point for the SSUMMO algorithm. Returns
        a dictionary of results containing taxonomic nodes with matches
        and at each node the accessions that have been assigned there.
        """
        self.threads = []
        thread_pipes = [multiprocessing.Pipe() for i in xrange( int(CONFIG.options['-ncpus']))]
        score_pipe_parent,score_pipe_child = multiprocessing.Pipe()
        self.hmmsearch_outQ = multiprocessing.Queue()
        self.lock = multiprocessing.RLock()
        for i in xrange(int(CONFIG.options['-ncpus'])):
            self.threads.append( HMMsearchThread( thread_pipes[i][0], thread_pipes[i][1], self.hmmsearch_outQ ,self.lock) )
        CONFIG.printHeaders = True  ## Doesn't actually work. In case we wanted to print hmmsearch results.
        for thread in self.threads:  
            thread.start()
        seq_file_name = CONFIG.options['-in']
        prefetchQ = multiprocessing.Queue() # Send accessions for retrieval.
        self.distribute_Q = multiprocessing.Queue()
        self.reverse_pipe_in , self.reverse_pipe_out = multiprocessing.Pipe()
        #### Create processes / threads (calls __init__ on each one).
        self.seqDB = seqDB(seq_file_name, prefetchQ, self.distribute_Q, self.threads, self.lock,reverse_pipe = self.reverse_pipe_out , format=CONFIG.options['-format'])
        self.scorer = Scorer( self.hmmsearch_outQ, score_pipe_parent, self.distribute_Q )
        #### Start them.
        self.scorer.start()
        self.seqDB.start()
        ############# Processes started. ##############
        t0 = time.time()
        results, diverge_dict = self.reverser( score_pipe_child )
        tdict = getTaxIndex()
        t = time.time()
        start_node, start_dir = findStart(tdict )
        if start_dir != CONFIG.arbDBdir:
            print 'shuffling along'
            results, start_node = self.shuffle_along( results,start_dir, start_node )
        print "starting at '{0}'".format(start_dir)
        delete = []
        for node in results.keys():
            if len(results[node]['accessions']) == 0:
                continue
            print_ASCII_tree( node, results, 0 )
            results[node], diverge_dict = SSUMMO(start_node[node], os.path.join( start_dir, node ), results[ node ], self.seqDB, score_pipe_child,diverge_dict=diverge_dict)
        for node in delete:
            del( results[node] )
        if len(results.keys()) == 0:
            sys.stderr.write( "Not a single sequence assigned unambiguously!")
        else:
            results = finalMerge( results, diverge_dict )
        t = time.time() - t0
        print "##\tprocessed {0} sequences in {1} seconds".format(countseqs(CONFIG.arbDBdir, file_name = seq_file_name), t)
        return results

    def __del__( self ):
        self.seqDB.prefetchQ.put('END')  ## SeqDB will end Distributor. Distributor will end hmmsearch threads.
        self.hmmsearch_outQ.put('END')  ## Scorer inQ
        self.seqDB.join()
        self.scorer.join()

    def close( self ):
        self.__del__()
    def shuffle_along( self , results_dict , start_dir ,start_node):
        last_dir = os.path.realpath(CONFIG.arbDBdir).rstrip( os.path.sep ).rsplit( os.path.sep,1 )
        start_dir_list = os.path.realpath(start_dir).rstrip( os.path.sep ).split( os.path.sep )
        if last_dir == start_dir_list[-1]:
            folders = []
        else:
            try:
                folders = start_dir_list[ start_dir_list.index( last_dir ) + 1: ]
            except ValueError:
                raise ValueError( 'arbDBdir in CONFIG.py ({0}) must start with same file path as -start ({1})'.format( CONFIG.arbDBdir , start_dir  ))
        node = results_dict
        start_nodes = set()
        for folder in folders:
            ## Delete irrelevant nodes from start_node.
            start_nodes.add( folder )
            node_names = set( node.keys() )
            other_nodes = start_nodes.difference( node_names )
            for other_node in other_nodes:
                del( node[other_node] )
            ## Move into next node.
            node.update( { folder : { 'accessions': node.pop('accessions') } } )
            node = results_dict[folder]
            start_node = start_node[folder]
        return results_dict, start_node

    def reverser( self, result_pipe ):
        nodes = { 'Archaea' : {},
                'Bacteria' : {},
                'Eukaryota' : {},
                'rArchaea' : {},
                'rBacteria' : {},
                'rEukaryota' : {} }
        self.distribute_Q.put( (CONFIG.arbDBdir, sorted(nodes.keys()) , ) )  ## --> sequence_distributor
        results, diverge_dict = score2( result_pipe, nodes, self.seqDB, CONFIG.arbDBdir )
        results = self._reverse( results )
        return results, diverge_dict

    def _reverse( self, results):
        Ireversed = 0
        total = 0
        delete = []
        for node in results.keys():
            self.Write( 'In {0}, '.format( node ) )
            if node[:1] == 'r':  # top match is a reverse node.
                choice = node[1:]
                if 'accessions' in results[node].keys():
                    accessions = results[node].pop( 'accessions' )
                    self.Print( "there's {0} sequences:-".format( len( accessions ) ) )
                    for accession in accessions:
                        Ireversed += 1
                        self.reverse_pipe_in.send( accession )
                    if choice not in results:
                        results.update( { choice : { 'accessions' : [] } } )
                    if 'accessions' in results[choice].keys():
                        results[choice]['accessions'] +=  accessions 
                    else:
                        results[choice].update( { 'accessions' : accessions } )
                else:
                    pass
                delete.append( node )
                continue
            else:
                if 'accessions' in results[node].keys():
                    self.Print( "there's {0} sequences:-".format( len( results[node]['accessions'] ) ) )
                else:
                    delete.append( node )
                continue
        self.reverse_pipe_in.send( 'END' )
        for node_to_go in delete:
            del( results[node_to_go] )
        delete = []
        for node in results:
            if 'accessions' not in results[node].keys():
                delete.append( node )
            else:
                total += len( results[node]['accessions'] )
        for node_to_go in delete:
            del( results[node_to_go] )
        if Ireversed > 0:
            self.Print( 'Reversed {0} sequences'.format( Ireversed ) )
        return results
    def Print( self, string ):
        self.lock.acquire()
        print string
        self.lock.release()
    def Write( self, string ):
        self.lock.acquire()
        sys.stdout.write( string )
        sys.stdout.flush()
        self.lock.release()

def parseArgs( args ):
    options = Options()
    options.options = {'-start' : CONFIG.arbDBdir,
        '-in' : [] ,
        '-out': None , 
        '-ncpus': multiprocessing.cpu_count()-1 ,
        '-Eval': str(10),
        '-format': 'fasta',
        '-score': str(1),
        '-servers': ('localhost'),
        '--createXML' : False,
        '--verbose' : False,
        '--max':False
        }
    options.helpTxt = {'-start' : 'Start node for SSUMMO. Can be a domain, or deeper, but must start at least with the domain',
        '-in' : 'Query sequence file.',
        '-out': 'Output results file names. Suffix will change. [Optional - prefix inferred from -in]',
        '-ncpus':'Number of worker processes to initiate. [CPU count - 1]',
        '-format':'Input file sequence format [fasta].',
        '-servers': 'Server names. [Not implemented yet].',
        '-Eval': 'hmmsearch Evalue threshhold [10]',
        '-score':'hmmsearch score threshold [1]'
        }
    options.switches = {
        '--createXML':'Create phyloxml output?? [No]',
        '--verbose' : 'Print HMMer results [No!]',
        '--max' : 'Use hmmsearch --max flag (bypass filters that remove sequences from full scoring set). [no]'
        }
    options.singleargs = [ '-out','-ncpus','-servers','-createXML','-start','-format']
    options.multiargs = [ '-in','-out' ]
    options.parseArgs( args )
    if options['-out'] == None:
        options.options.update( { '-out' : [] } )
        for option in options['-in']:
            prefix = option[ : option.rfind('.') ]
            options['-out'].append( prefix )
    in_len = len( options['-in'] ) ; out_len = len( options['-out'] )
    if out_len != in_len:
        raise IOError( 'Must supply the same number of options to -in as to -out\n.Got {0} and {1}, respectively.'.format(in_len,out_len ) )
    return options

def save( save_name , object ):
    with file( save_name,'wb' ) as save_file:
        print "##\tSaving object to '{0}'".format( save_file.name )
        pickle.dump( object, save_file , -1 )

def main( options ):
    t0 = time.time()
    CONFIG.options = options
    ins, outs = ( CONFIG.options['-in'], CONFIG.options['-out'], )
    n_files = len( ins )
    for file_ind in xrange( n_files ):
        t1 = time.time()
        CONFIG.options['-in'] = ins[file_ind]
        CONFIG.options['-out'] = outs[file_ind]
        try:
            Iprepare = Preparer()
            results_dict = Iprepare.prepare( )
        except Exception:
            raise
        finally:
            Iprepare.close()
            #del( Iprepare )
        print "##\t{0} ambiguous results".format(CONFIG.ambiguousCount)
        ### Save results files
        try:
            save( '{0}.pkl'.format(CONFIG.options['-out']) , results_dict )
        except IOError:
            if sys.stdin.closed:
                suffix = 'results'
                #sys.stdin = os.fdopen( sys.__stdin__.fileno(),'r')
                #sys.stdin.flush()
            else:
                suffix = 'results'
                #suffix = raw_input( "Can't save to {0}. Please enter a filename where to save the results [results].pkl")
                if len(suffix) == 0:
                    suffix = 'results'
            save_name = suffix + '.pkl'
            i = 0
            while os.path.exists( save_name ):
                i += 1
                save_name = '{0}{1}.pkl'.format( suffix,i )
                print '{0} already exists.'.format( save_name )
            CONFIG.options['-out'] = save_name
            save( save_name , results_dict )
        if options['--createXML']:
            startNode,startDir = findStart( getTaxIndex())
            print "##\tWriting phyloxml file to '{0}.xml'".format(options['-out'])
            with file('{0}.xml'.format(options['-out']),'w') as write_handle:
                write_xml(startDir,write_handle,results_dict)
        t = time.time()
        mins = int(t-t0) / 60
        print "##\Finished {0} in {1} mins{2} secs".format( CONFIG.options['-in'], mins, (t - t0) - (mins*60))
    t = time.time()
    mins = int(t-t0) / 60
    print "##\tFinished everything in {0} mins{1} secs".format( mins, (t - t0) - (mins*60))


if __name__ == '__main__':
    args = sys.argv[1:]
    CONFIG.options = parseArgs( args )
    #cProfile.run( 'main( CONFIG.options )' )
    main( CONFIG.options )
