#!/usr/bin/env python
from ssummo.dict_to_phyloxml import write_xml
from ssummo.count_hmms import countseqs
from ssummo.ssummolib import Options, load_index, dict_walk , get_accessions
import os
import re
import sys
import time
import cPickle as pickle
import subprocess
import multiprocessing
import CONFIG
import Bio.SeqRecord
from Bio import SeqIO
#import ArbIO
from threading import Thread
CONFIG.ambiguousCount = 0

def getTaxIndex(silent=False):
    tdict = load_index(silent=silent)
    for key in tdict.keys():
        if key not in ['Bacteria','Eukaryota','Archaea']:
            del(tdict[key])
    return tdict

class Parser( ):
    def __init__( self, lock=None,verbose=False,domtblout=False ):
        self.lock = lock if lock==None else multiprocessing.Lock()
        if lock == None:
            self.lock = multiprocessing.Lock()
        else:
            self.lock = lock
        if verbose:
            self.printHeaders = True
            self.verbose = True 
        if domtblout:
            self.set_scorer( True )
        else:
            self.set_scorer( False )
        self.line_splitter = re.compile( '\s+' )

    ## Line parsing
    def _parse_line( self, *args ):
        """API for attaching line parsing methods. Have verbose & non-verbose, configured with
        set_verbose( True|False )
        """
        pass
    def _parse_verbose( self, line ):
        with self.lock:
            print line,
        return self._get_scores( line )
    def _silent_parse( self, line ):
        return self._get_scores( line )
        
    def set_verbose( self, print_line=False ):
        if print_line:
            self.verbose = True
            self._parse_line = self._parse_verbose
        else:
            self.verbose = False
            self._parse_line = self._silent_parse

    ## Extracting scores
    def _get_scores( self, *args ):
        """API for attaching hmmsearch result parsers. Currently have a table and domain table
        parser, tested against hmmsearch v3.0.
        Can switch between the two with set_scorer( True|False ).
        """
        pass
    def _domtbl_scores( self, line ):
        values = self.line_splitter.split( line.rstrip(),22 )
        #                                                                                     --- full sequence --- -------------- this domain -------------           hmm    coord    ali     coord   env coord
        # target name                 accession   tlen query name           accession   qlen   E-value  score  bias   #      of    c-Evalue  i-Evalue  score    bias  from       to    from       to      from    to     acc     description of target
        # accession,                  null,      seqLen,  HMM,                null2,   HMMLen,   Eval,  score, bias,nDomains,null3,cEval,   iEval,    domscore,dombias,hmmstart,hmmend,alistart,aliend,envstart,envend,accuracy,description
        #  0                          1           2        3                    4       5         6      7     8      9      10    11       12         13        14    15        16    17         18       19     20      21       22
        result_dict = {'accession'  : values[0]        ,  # accession,
                       'seqLen'     : int(values[2])   ,  # seqLen,   
                       'HMM'        : values[3]        ,  # HMM,      
                       'HMMLen'     : values[5]        ,  # HMMLen,   
                       'Eval'       : float(values[6]) ,  # Eval,     
                       'score'      : float(values[7]) ,  # score,    
                       'bias'       : float(values[8]) ,  # bias,     
                       '#'          : int(values[9])   ,  # nDomains, 
                       'cEval'      : float(values[11]),  # cEval,    
                       'iEval'      : float(values[12]),  # iEval,    
                       'domscore'   : float(values[13]),  # domscore, 
                       'dombias'    : float(values[14]),  # dombias,  
                       'hmmstart'   : int(values[15])  ,  # hmmstart, 
                       'hmmend'     : int(values[16])  ,  # hmmend,   
                       'alistart'   : int(values[17])  ,  # alistart, 
                       'aliend'     : int(values[18])  ,  # aliend,   
                       'envstart'   : int(values[19])  ,  # envstart, 
                       'envend'     : int(values[20])  ,  # envend,   
                       'accuracy'   : float(values[21]),  # accuracy, 
                       'description': values[22],  # seqDesc,  
                       }
        return result_dict

    def _tbl_scores( self, line ):
        values = self.line_splitter.split( line )
        result_dict = { 'accession' : values[0],
                        'Eval'      : float(values[4])  ,
                        'score'     : float(values[5])  ,
                }
        return result_dict
    def set_scorer( self, domain_table ):
        """If parsing hmmsearch results from the `--domtblout` option,
        then the tabular output is different. Pass True to parse
        domain table results. This is the default.
        Otherwise, pass False
        """
        if domain_table:
            self._get_scores = self._domtbl_scores
        else:
            self._get_scores = self._tbl_scores

    ## Header parsing
    def _parse_header( self, *args ):
        """API for parsing headers. If you want to change from silent
        to verbose parsing or vice-versa, use `set_parse_header(bool)`"""
        pass
    def _silent_header( self, line ):
        return
    def _parse_header_verbose( self, line ):
        with self.lock:
            print line
        return
    def set_parse_header( self, print_header=True ):
        if print_header:
            self.printHeaders = True
            self._parse_header = self._parse_header_verbose
        else:
            self.printHeaders = False
            self._parse_header = self._silent_header

    def parse( self, hmmsearchProcess ):
        ## *** The parser!! *** ##
        try:
            for line in hmmsearchProcess.stdout:
                if line[:1] == '#':
                    self._parse_header( line )
                else:
                    yield self._parse_line( line )
            retCode = hmmsearchProcess.wait()
            if retCode != 0:
                sys.stderr.write( hmmsearchProcess.stderr.read() )
                sys.stderr.flush()
                raise IOError
        except KeyboardInterrupt:
            pass
        except Exception:
            raise
        finally:
            hmmsearchProcess.stdout.close()
            hmmsearchProcess.stderr.close()

def MergeDiverge( resultsDict, DivDict, tax_node ):
    """ Given the results_node dictionary (resultsDict), add accessions present
    in DivDict to the appropriate nodes in resultsDict."""
    tax_keys = tax_node.keys()
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        for node in DivDict[accession].keys():
            if node == 'start_node' or node == 'start':
                continue
            else:
                cur_node = os.path.sep.rsplit( node,1 )[-1] if os.path.sep in node else node
                if cur_node in tax_keys:
                    if cur_node in resultsDict:
                        resultsDict[cur_node]['accessions'].append(accession)
                    else:
                        resultsDict.update( { cur_node : { 'accessions' : [accession] } } )
                else:
                    continue
    return resultsDict,DivDict

def finalMerge( resultsDict, DivDict ):
    reg = re.compile( '|'.join([ '(?<={1})({0}){1}'.format(key,os.path.sep) for key in resultsDict.keys() ] ) )
    # reg checks for Bacteria, Archaea or Eukaryota essentially..
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        firstNode = reg.search(start)
        if firstNode:
            pathList = start[firstNode.start():].split( os.path.sep )
        else:
            pathList = []

        # get to the start node
        node = resultsDict
        for OTU in pathList:
            if OTU not in node:
                node.update( { OTU : {} } )
            node = node[OTU]

        divKeys = DivDict[accession].keys()
        for subnode,path in dict_walk( '' , node ):
            if 'accessions' in subnode:
                if accession in subnode['accessions']:
                    del( node['accessions'][ node['accessions'].index( accession ) ] )
        if 'accessions' in node.keys():
            node['accessions'].append( accession )
        else:
            node.update( { 'accessions' : [accession] } )

        continue
        del(divKeys[divKeys.index('start_node')])
        del(divKeys[divKeys.index('start')])
        for path_to_node in divKeys:
            end_node = node
            try:
                for taxa in path_to_node.split( os.path.sep ):
                    end_node = end_node[taxa]
            except KeyError:
                continue
                print end_node.keys()
                raise
            if 'accessions' in end_node:
                if accession in node['accessions']:
                    del( end_node['accessions'][ end_node['accessions'].index( accession ) ] )

    return resultsDict

def SepDiverge(resultsDict, DivDict,node_keys,seq_db):
    """Given the results dictionary and the diverge dictionary,
    check the scores present in DivDict, looking for a single
    highest score.
    If there is a single highest score, place that in the 
    appropriate location and delete it from DivDict.
    Otherwise, don't change anything."""
#   print "##\t{0} ambiguous sequences: {1}".format(len(DivDict.keys()) ,[seq_db.info[acc]['desc']for acc in DivDict.keys()]  )
    for accession in DivDict:
        divKeys = DivDict[accession].keys()
        del(divKeys[divKeys.index('start_node')])
        del(divKeys[divKeys.index('start')])
        scores = []
        for node in divKeys:
            scores.append( DivDict[accession][node] )
        bestScore = max(scores)
        nBest = scores.count(bestScore)
        if nBest == 1:
            print "We got a winner!"
            bestNode = divKeys[ scores.index(bestScore) ]
            if os.path.sep in bestNode:
                bestNode = bestNode.rsplit(os.path.sep,1)[1]
            if bestNode in resultsDict.keys() and accession not in resultsDict[bestNode]:
                resultsDict[bestNode]['accessions'].append(accession)
            else:
                if accession not in bestNode['accessions']:
                    resultsDict.update( { bestNode : { 'accessions' : [ accession ] } } )
                else:
                    print "Don't think we need this if clause in SepDiverge"
 #               continue
                # I replaced the below line with continue to attempt to fix a bug. This may not have fixed that
                # bug and instead caused other undesired behaviour.
                #DivDict[accession] = { bestNode : [ bestScore ] , 'start' : DivDict[accession]['start'] }
            del(DivDict[accession][bestNode])
            clean_converged( accession , DivDict[accession] )
            del( DivDict[accession] )
        else:
            continue
    return resultsDict, DivDict

def clean_converged( accession , diverge_dict_item ) : 
    print "{0} has converged!"
    div_keys = diverge_dict_item.keys()
    initial_node = diverge_dict_item['start_node']
    del( div_keys[div_keys.index('start_node')] ) ; del( div_keys[div_keys.index('start')] )
    for route in div_keys:
        if os.path.sep in route:
            path = route.split( os.path.sep )
        else:
            path = [route]
        node = initial_node
        for p in path:
            node = node[p]
        if accession in node['accessions']:
            del( node['accessions'][ node['accessions'].index(accession)] )
    return

def find_rel_path( node, OTUname,RelPath=[] ):
    """Given a dictionary node, and an OTU name, will search for 
    OTUname from the top of node and return the relative path, 
    delimited by '/'"""
    for key in node.keys():
        if key == OTUname:
            RelPath.append(key)
            return RelPath
        elif key == 'accessions':
            continue
        else:
            nextPath = RelPath + [key]
            x = find_rel_path( node[key] , OTUname, RelPath = nextPath )
            if type(x) == list:
                return x
            else:
                continue

def score2( scorer_pipe, tax_node, path, results_dict = None, diverge_dict = None ):
    """Called once per iteration. Therefore needs to receive the node names once
    at the start. Then it should start receiving lots of hmmmsearchThread results.
    """
    if results_dict is None:
        results_dict = {}
    elif 'accessions' in results_dict:
        results_dict.pop( 'accessions' )
    if diverge_dict is None:
        diverge_dict = {}
    inval = scorer_pipe.recv()  # This'll be the (sorted!) node names.
    nodes = inval
    inval = scorer_pipe.recv()  # This'll then be (accession, results,) for a single sequence.
    choices = set()
    unique_accessions = []
    ambiguous_nodes = {}
    while inval != 'end':
        try:
            accession, results = inval
            Evals, scores = results
        except ValueError:
            raise IOError(inval)
            #raise IOError( "[score2] Scorer ended with {0}".format( inval ) )
        best_score = max( scores )
        n_best = scores.count( best_score )
        if n_best == 1:
            choice = nodes[ scores.index( best_score ) ]
            if choice in choices:
                results_dict[choice]['accessions'].append( accession )
            else:
                choices.add( choice )
                results_dict.update( { choice : {'accessions': [accession] } } )
            if accession in unique_accessions:
                print "Already got {0} assigned to".format( accession ),
                for ch in choices:
                    if accession in results_dict[ch]['accessions']:
                        print ch,' ',
                    print 'Now we have it for {0}'.format( choice )
            unique_accessions.append( accession )
        elif n_best > 1: ## Ambiguous node!
            CONFIG.ambiguousCount += 1
            prev_index = -1
            top_scorers = []
            for count in xrange( n_best ):
                prev_index = scores.index( best_score, prev_index + 1 )
                top_scorers.append( nodes[prev_index] )
                if accession in diverge_dict.keys():
                    OTU_order = os.path.sep.join(  find_rel_path( diverge_dict[accession]['start_node'] , top_scorers[-1] )  )
                    diverge_dict[accession].update( { OTU_order : best_score } )
                else:
                    diverge_dict.update( { accession : { top_scorers[-1] : best_score, 'start' : path , 'start_node' : tax_node } } )
            top_scorers = tuple( top_scorers )
            if top_scorers in ambiguous_nodes:
                ambiguous_nodes[top_scorers] += 1
            else:
                ambiguous_nodes.update( { top_scorers : 1 } )
            #print 'Ambiguous node between: {0}'.format( ' & '.join( top_scorers ) )
        elif n_best == 0:
            print '##\tno matches for accession {0}'.format(accession)
        else:
            print "##\tn_best = {0}".format(n_best)
            raise ValueError("Can't figure out the winning nodes")
        inval = scorer_pipe.recv()
    if len( ambiguous_nodes ) > 0:
        print_ambiguous_results( ambiguous_nodes )
    return results_dict, diverge_dict

def print_ambiguous_results( amb_nodes ):
    print '# Ambiguous nodes:-\n',
    for amb_node in amb_nodes:
        comp = ' & '.join( amb_node )
        print '# ' + comp.ljust(40) , ' - {0}'.format( amb_nodes[amb_node] )

class Scorer( Thread ):
    """
    Let's put (through inQ) results here as they're produced by
    parseHMMThread. Scorer will continue to collate and order results
    as they're produced. As soon as all hmmsearch results for a sequence 
    are ready, then they're piped to score.
    """
    n_done = 0
    n_to_do = 0
    def __init__( self, hmmsearch_outQ, result_pipe, distrib_end_queue, seqDBQ ):
        Thread.__init__(self)
        self.inQ = hmmsearch_outQ
        self.out_pipe = result_pipe
        self.d_Q = distrib_end_queue
        self.seqDBQ = seqDBQ
        self.fns = {'end'    : self._end,
                    'update' : self._update,
                    'reset'  : self._reset,
                    'die'    : self._die,
                   }
        self.err_count = 0

    def _die( self, error, OTUName='' ):
        self.d_Q.put( OTUName )
        self.err_count += 1
        self.n_done += 1
        if CONFIG.debug:
            print '[Scorer.die] ',OTUName, 'dead:', self.err_count , ' to die:', self.n_to_do
        if self.n_done == self.n_to_do:
            self.seqDBQ.put( ('die',error) )  # --> seqDB
            self.out_pipe.send(error)         # --> score2

    def _end( self, OTU ):
        # Orders from HMMSearchThread
        self.n_done += 1
        self.d_Q.put( OTU )                  # --> distributor
        if self.n_done == self.n_to_do:
            for accession in self.results.keys():  
                self.out_pipe.send( ( accession, self.results.pop(accession), ) )
            self.out_pipe.send( 'end' )      # --> score2
            del( self.results )

    def _reset( self , nodes ):
        # from SSUMMO / reverser
        self.results = {}
        self.n_accessions_got = {}
        self.results_val = []
        self.hmm_indices = {}
        self.out_pipe.send( nodes )          #  --> score2
        self.n_to_do = len( nodes )
        for i,node in enumerate( nodes ):
            self.hmm_indices.update( { node : i } )
            self.results_val.append( float() )
        self.n_done = 0

    def _update( self, OTU, results ):
        # Ordered by HMMSearchThread
        (accession, Eval, score, ) = (results['accession'] , results['Eval'] , results['score'], )
        HMM_index = self.hmm_indices[ OTU ]
        ## Update result for this accession.
        if accession in self.results:
            self.results[accession][0][ HMM_index ] = Eval
            self.results[accession][1][ HMM_index ] = score
            self.n_accessions_got[accession] += 1
            #if self.n_accessions_got[accession] == self.n_to_do:
            #    self.out_pipe.send( (accession, self.results.pop( accession ),) )   #  -->  score2
        else:
            self.results.update( { accession : [ self.results_val[:], self.results_val[:] ] } )
            self.n_accessions_got.update( { accession : 1 } )
            self.results[accession][0][ HMM_index ] = Eval
            self.results[accession][1][ HMM_index ] = score
        return

    def run(self):
        inval = self.inQ.get()
        while inval != 'END':
            fn = inval[0]
            args = inval[1:]
            self.fns[fn]( *args )
            inval = self.inQ.get()
        #print '[Scorer.run] over'
        return

#class HMMSearchThread( Thread):
        #Thread.__init__(self)
class HMMSearchThread( multiprocessing.Process ):
    def __init__(self,inPipe,outPipe, d_Q, lock):
        multiprocessing.Process.__init__(self)
        if os.uname()[-1] == 'x86_64':
            self.hmmsearch = os.path.join( CONFIG.hmmerdir, CONFIG.hmmsearchCMD )
        else:
            self.hmmsearch = os.path.join( CONFIG.hmmer32dir, CONFIG.hmmsearchCMD )
        self.inPipe, self.__outPipe = (inPipe, outPipe, )
        self.outQ = d_Q  ## Goes to Scorer.
        #self.sem = semaphore
        if CONFIG.options['--max']: # max sensitivity:-
            self.command = [ self.hmmsearch, '-o', os.devnull, '--cpu', '1',
                             '--max',
                             '--domtblout', '/dev/stdout', '--noali',
                             'REPLACED', '/dev/stdin' ]
        else:
            self.command = [ self.hmmsearch,'-o',os.devnull, '--cpu', '1',
                             '--domtblout', '/dev/stdout', '--noali',
                             'REPLACED', '/dev/stdin' ]
        CONFIG.printHeaders = False
        self.lock = lock
        self.parser = Parser()
        self.parser.set_scorer(True)  # parse domtblout instead of tblout

    def run(self):
        self.Eval = CONFIG.options['-Eval']
        self.score = CONFIG.options['-score']
        parser = self.parser.parse
        if CONFIG.options['--verbose']:
            self.parser.set_verbose(True)
        else:
            self.parser.set_verbose(False)
        try:
            OTUName = None
            nseqs,HMMLocation = self.__outPipe.recv()  # <-- sequence_distributor
            while nseqs != 'END':
                self.command[-2] = HMMLocation  ### This replaces 'REPLACED' in self.command, setting the HMM location.
                OTUName = HMMLocation[ HMMLocation.rfind( os.path.sep ) + 1: HMMLocation.rfind('.') ]
                try:
                    process = subprocess.Popen(self.command, shell=False,
                                               stdin=subprocess.PIPE,
                                               stderr=subprocess.PIPE,
                                               stdout=subprocess.PIPE,
                                               bufsize=-1)
                except OSError,e:
                    self.outQ.put( ('die', e,OTUName )) # --> Scorer
                    sys.stderr.write( "COMMAND FAILED: {0}\n".format( ' '.join(self.command) ) )
           #         while self.__outPipe.recv() != 'END':  # <-- should come from distributor
           #             continue
                    self.__outPipe.recv() # seqs
                    nseqs,HMMLocation = self.__outPipe.recv()
                    continue
                try:
                    # feed in the sequences received from distributor.
                    for seq in self.__outPipe.recv():
                        process.stdin.write( seq )
                except IOError, e:
                    #sys.stderr.write( '[HMMSearchThread.run] {0} dying. --> Scorer'.format( OTUName ) )
                    hmmerror = '\n'.join( process.communicate() )
                    self.outQ.put( ('die', '{0}\n{1}'.format(e, hmmerror), OTUName )) # --> Scorer
                    sys.stdout.write('HMMError: ')
                    sys.stdout.write(hmmerror)
           #         while self.__outPipe.recv() != 'END':  # <-- should come from distributor
           #             continue
                    raise(e)
                except Exception, e:
                    hmmerror = '\n'.join( process.communicate() )
                    hmmerror = process.communicate()[1]
                    self.outQ.put( ('die', '{0}\n{1}'.format(e, hmmerror), OTUName) ) # --> Scorer
                    raise(e)
                finally:
                    process.stdin.close()
                for results in parser( process ):
                    self.outQ.put( ('update',OTUName, results,)  )   # --> Scorer
                #self.sem.release()  # Place if using semaphores, which seem unnecessary..?
                self.outQ.put( ('end', OTUName,) )        ## --> Scorer
                nseqs,HMMLocation = self.__outPipe.recv() ## <--  distributor
        except KeyboardInterrupt,e:
            self.outQ.put( ('die', e, {'OTUName' : OTUName}) ) # --> Scorer
            raise(e)
        finally:
            self.__outPipe.close()
        return

def SSUMMO(node, path,results_node,seqDB,result_pipe,diverge_dict={}):
    ## At each path, choose which dir to go into, by performing
    ## hmmsearch on the fasta sequence against each next HMM.
    depth = path.count( os.path.sep ) - CONFIG.arbDBdir.count( os.path.sep )
    node_keys = sorted(node.keys())
    if 'accessions' in node_keys:
        del(node_keys[node_keys.index('accessions')])
    num_nodes = len(node_keys)

    if num_nodes == 0:
        return results_node, diverge_dict
    ## Merge ambiguous results with results_node
    results_node,diverge_dict = MergeDiverge(results_node,diverge_dict, node)
    seqDB.outQ.put(( 'reset', path, node_keys, )) ## --> sequence_distributor.

    ## First time we enter SSUMMO(), seqDB already has all sequences loaded.
    if os.path.realpath(path) == os.path.realpath( CONFIG.options['-start']):
        seqs = 'all'  
        seqDB.prefetchQ.put( ('get_all',None ) )
    else:
        accessions = results_node['accessions']
        seqDB.prefetchQ.put(('get',accessions,))  ## --> seqDB.
    if num_nodes == 1:  ## If only one node, no need for hmmsearch'ing.
        singleNode = node_keys[0]
        results_node =  { singleNode: { 'accessions' : results_node.pop( 'accessions' ) } }
        seqDB.prefetchQ.put( ('Skip',None) )
    else:  
        seqDB.prefetchQ.put( ('Proceed',None) )
        results_node, diverge_dict = score2( result_pipe, node , path ,  diverge_dict=diverge_dict)

    results_node, diverge_dict = SepDiverge(results_node,diverge_dict,node_keys,seqDB)
    if len(results_node) == 0 and diverge_dict=={}:
        print "##\tNo results"
        print "##\t", node_keys
        print "##\t", path
        # Actually could throw an error in here. i.e. no need for the clause..? Never actually seen these printed.
        return results_node, diverge_dict 
    ## Separate the results after doing SSUMMO on all nodes.
    choices = sorted( results_node.keys() )

    ## Recursively do SSUMMO.
    for choice in choices: 
        if len(results_node[choice]['accessions']) == 0:
            del(node[choice])  ## If node has no results, delete it.
        else:  ## otherwise recursively do SSuMMO on winning node.
            print_tree_line( choice, results_node, depth )
            results_node[choice], diverge_dict = SSUMMO(node.pop(choice), os.path.join(path, choice ), results_node[choice],seqDB,result_pipe,diverge_dict=diverge_dict)
    return results_node, diverge_dict

def print_tree_line( choice,results_node,depth ):
    choices = sorted( results_node.keys() )
    if choice == choices[0] and len(choices) == 1:
        sys.stdout.write( '{0}|- {1}'.format(' '*depth*3, choice ).ljust(45)     +
                ' ({0})\n'.format( len(results_node[choice]['accessions'] ) ))
    elif choice == choices[0]:
        sys.stdout.write( '{0}+- {1}'.format(' '*depth*3, choice ).ljust(45)     +
                ' ({0})\n'.format( len( results_node[choice]['accessions']) ) )
    elif choice == choices[-1]:
        sys.stdout.write( '{0}|_ {1}'.format(' '*depth*3, choice ).ljust(45)     + 
                ' ({0})\n'.format( len( results_node[choice]['accessions']) ) )
    else:
        sys.stdout.write( '{0}|- {1}'.format(' '*depth*3, choice ).ljust(45)     +
                ' ({0})\n'.format(len(results_node[choice]['accessions']) ) )
    sys.stdout.flush()
    return

def findStart( tdict ):
    """Locates the directory where to enter the SSUMMO loop.
    Default is to start in arbDBdir, which is configured in
    CONFIG.py.
    To change, give the command option '-start /some/path/to/dir'
    """
    if os.path.realpath(CONFIG.options['-start']) == os.path.realpath(CONFIG.arbDBdir):
        return tdict, CONFIG.arbDBdir
    else:
        found = False
        startKeys = tdict.keys()
        startDir = CONFIG.options['-start']
        startDir = startDir.rstrip(os.path.sep)
        pathList = CONFIG.options['-start'].split( os.path.sep)
        for key in startKeys:
            if key in pathList:
                firstNode = pathList.index(key)
                found = True
                break
            else:
                continue
        node = tdict
        if not found:
            return tdict, CONFIG.arbDBdir
        for nodeName in pathList[firstNode:]:
            if nodeName.strip() == '':
                continue
            else:
                node = node[nodeName]
                parentNode = nodeName
        startDir = os.path.join( CONFIG.arbDBdir,os.path.sep.join(pathList[firstNode:]) )
        CONFIG.options['-start'] = startDir
    print "##\tStarting SSUMMO from node '{0}' at path '{1}'".format(parentNode, startDir)
    return node, startDir

#class seqDB( multiprocessing.Process ):
class seqDB( Thread ):
    def __init__( self, seqfile, prefetchQ, distribute_Q, distributor_end_queue, threads,lock,reverse_pipe=None,format='fasta'):
        """This is the only process that reads the sequence input file.

        prefetchQ must be a multiprocessing.Queue() object. In this, you must put the
           following keywords, which will be called from their references in
           self.functions:-
               *    annotate
               *    die
               *    forget
               *    forget_inverse
               *    get
               *    get_all
               *    reverse
               *    separate
               *    slice
               *    Skip
               *    Proceed
        See the help for the relevant function to see what they do.
        You can add you own functions too.

        distribute_Q - Don't touch. Scorer uses this to synchronise finished nodes.
        threads   - list of threads which sequences shall be distributed to.
        lock      - multiprocessing.Lock() instance
        semaphore - multiprocessing.semaphore() instance. [removed semaphore implementation. Seemed unnecessary]
        reverse_pipe - Pipe used by seqDB.reverse()
                       Send accessions down the other pipe end, and
                       this will reverse them.
        format    - format of sequences in the sequence file.
        """
        #multiprocessing.Process.__init__(self)
        Thread.__init__(self)
        self.prefetchQ = prefetchQ
        self.seqfile = seqfile
        self.format = format
        self.rpipe = reverse_pipe
        self.lock = lock
        #self.outQ = multiprocessing.Queue()  # Queue going to distributor.
        self.distributor = sequence_distributor( distribute_Q , threads , distributor_end_queue, prefetchQ)
        self.threads = threads
        self.outQ = distribute_Q
        self.functions = {
                'annotate'  : self.annotate,
                'die'       : self.die,
                'forget'    : self.forget,
                'forget_inverse': self.forget_inverse,
                'get'       : self.get,
                'get_all'   : self.get_all,
                'reverse'   : self.reverse,
                'separate'  : self.separate,
                'slice'     : self.slice,
                'Skip'      : self.skip,
                'Proceed'   : self.proceed,
                }
        self.alive = False

    def annotate( self , outfile ):
        """seqDB knows nothing of the taxonomic assignments, just accessions and
        sequences. We therefore read tuple pairs of:
                    ( <accession> , <assignment> )
        through self.prefetchQ, until the tuple ( False, False ) is read.

        When calling annotate (by sending the string 'annotate' through self.prefetchQ),
        also give the name of the output sequence file. Sequences are saved in the same
        format as they were read.

        Once all assigned sequences have been saved, the other sequences which were not
        assigned to a taxa are appended to the input file. If this is undesired, use
        separate instead.
        """
        accessions = self.separate( outfile )
        print '#\tSaving annotated sequences to {0}'.format( outfile )
        close = lambda: None
        if not hasattr( outfile, 'write' ):
            try:
                outhandle = file( outfile, 'w' )
                close = handle.close
            except IOError:
                print "can't write to {0}. dumping to standard out instead"
                time.sleep(1)
                outhandle = sys.stdout
        elif hasattr( outfile, 'closed' ) and outfile.closed:
            # closed file handle. Let's append to it
            outhandle = file( outfile.name ,'a' )
        else:
            # valid file handle
            outhandle = outfile
        try:
            with file( self.seqfile ,'r' ) as inhandle:
                for seq in SeqIO.parse( inhandle , self.format ):
                    if seq.id in accessions:
                        accessions.remove( seq.id )
                    else:
                        outhandle.write( seq.format( self.format ) )
        finally:
            close()

    def die(self,e):
        #self.prefetchQ.put('END')
        self.outQ.put( 'END' )  ## --> sequence_distributor
#        if self.alive:
#            self.shutdown()
        raise IOError('seqDB dying. Given error (prob from Scorer):\n',e)

    def forget( self, accessions ):
        """Deletes all provided accessions from memory."""
        for acc in accessions:
            del( self.seqs[ acc ] )

    def forget_inverse( self, accessions ):
        to_forget = set( self.seqs.keys() ).difference( set( accessions ) )
        self.forget( to_forget )

    def get( self, accessions ):
        """Given a list of accessions, puts each one into self.outQ."""
        for acc in accessions:
            self.outQ.put( ('add',self.seqs[acc].format('fasta')) )
        return

    def get_all(self,*args):
        """Puts all sequences, in fasta format, into self.outQ."""
        for seq in self.seqs.itervalues():
            self.outQ.put( ('add',seq.format('fasta'),) )
        return

    def load_sequences( self ):
        if self.format == 'fasta':
            desc_reg = re.compile( r'^\s*(\S+)\s*(\S*.*)[\n\r]?$' )
            accession_info = lambda SeqRecord: desc_reg.search( SeqRecord.description ).groups()
        else:
            accession_info = lambda SeqRecord: (SeqRecord.id , SeqRecord.description)
        self.seqs = {}
        ids = set()
        descs = set()
        self.distributor.start()
        with file(self.seqfile,'r') as inFile:
            for seq in SeqIO.parse( inFile , self.format ):
                #accession, info = desc_reg.search( seq.description ).groups()
                accession, info = accession_info( seq )
                self.seqs.update( { accession : seq } )   # Just modified this to use accession rather than seq.id, in conformance with the reg exp sequence description parser in SSUMMO_tally.
        return

    def proceed( self, *args ):
        """Put 'Proceed' into outQ, for sequence_distributor."""
        self.outQ.put( ('Proceed',None) )
        pass

    def reverse( self ):
        """Called once before traversing the directory. The input pipe (self.rpipe)
        is closed before returning None.
        """
        if self.rpipe is None:
            raise ValueError( "No reverse pipe given to seqDB!!" )
        for fasta_sequence in self.seqs.itervalues():
            self.outQ.put( ('add',fasta_sequence.format('fasta'),) )  # To distributor
        self.outQ.put( ('Proceed',None,) )
        #print '[seqDB.reverse] waiting for distributor.rpipe'
        to_reverse = self.rpipe.recv()
        #print '[seqDB.reverse] recv\'d ',to_reverse
        reversing = 0
        while to_reverse != 'END':
            seq = self.seqs[to_reverse]
            self.seqs.update( { to_reverse : \
                        Bio.SeqRecord.SeqRecord(seq.seq.reverse_complement(),
                        id = seq.id, name=seq.name ) } )  
            reversing += 1
            to_reverse = self.rpipe.recv()
        self.rpipe.close()

    def run(self):
        self.alive = True
        self.load_sequences()
        with self.lock:
            print "#SeqDB holding {0} sequences from {1} in memory.".format( len(self.seqs) , self.seqfile )
        #self.slice_all()  ## Uses prefetch Q.
        try:
            self.reverse()   # Enter reverse loop. This shall slice the sequences too.
            inval = self.prefetchQ.get()
            while inval != 'END':
                fn = self.functions[ inval[0] ]
                args = inval[1]
                fn( args )
                inval = self.prefetchQ.get()
        except (KeyboardInterrupt,Exception,),e:
            print 'seqDB error!'
            self.die(e)
        else:
            if CONFIG.debug:
                print '##\tseqDB.shutdown(...',
            self.shutdown()
            if CONFIG.debug:
                print '##\tseqDB.shutdown(...',

    def separate( self , outfile ):
        """Works similarly to seqDB.annotate.
        Reads (accession, annotation) tuple pairs through self.prefetchQ,
        and saves them to outfile. Typically, this saves all sequences
        that could be assigned to a taxa to <outfile>.
        """
        accessions = set()
        print '#\tSaving annotated sequences to {0}'.format( outfile )
        close = lambda: None
        if not hasattr( outfile, 'write' ):
            try:
                if len(outfile.rsplit('.',1)[1]) == 0:
                    outfile = outfile + self.format
                handle = file( outfile, 'w' )
                close = handle.close
            except IOError:
                print "can't write to {0}. dumping to standard out instead"
                time.sleep(1)
                handle = sys.stdout
        elif hasattr( outfile, 'closed' ) and outfile.closed:
            # closed file handle append
            handle = file( outfile.name ,'a' )
        else:
            # valid file handle
            handle = outfile
        try:
            accession , annotation = self.prefetchQ.get()
            while accession is not False:
                accessions.add( accession )
                sequence = self.seqs[accession]
                sequence.description += ' {0}'.format( annotation )
                handle.write( sequence.format( self.format ) )
                accession , annotation = self.prefetchQ.get()
        finally:
            close()
        return accessions

    def shutdown(self):
        """Closes down all threads."""
        if CONFIG.debug:
            print '[seqDB.shutdown] telling sequence_distributor to end'
        self.outQ.put( 'END', timeout=1 )  ## --> sequence_distributor
        for thread in self.threads:
            thread.inPipe.send( ('END',None) )
        thread.outQ.put( 'END', timeout=1 ) 
        #print '[seqDB.shutdown] joining sequence_distributor'
        #self.prefetchQ.close()  ## End SeqDBQ
        #self.distributor.join()
        #print '[seqDB.shutdown] done.'
        self.alive = False
        return

    def skip( self, *args ):
        """Put 'Skip' into outQ, for sequence_distributor."""
        self.outQ.put( ('Skip',None) )
        pass

    def slice( self , accession, start, end ):
        """This will slice sequence with seq.id accession, from
        start to end, INCLUSIVE. i.e. using python indexing [start -1 : end + 1].
        This was chosen to comply with hmmer alignment co-ordinates.
        """
        self.seqs[ accession ] = self.seqs[ accession ][ start : end + 1 ]


    def slice_all(self):
        """Keeps reading self.prefetchQ, expecting the tuple (accession, start, end).
        For each tuple received, this will slice the relevant sequence accordingly.
        This will continue to expect tuples until it gets "END" from self.prefetchQ.

        N.B. Sequences are sliced inclusive of given position.
        """
        inval = self.prefetchQ.get()
        while inval != 'END':
            id, start, end = inval
            self.slice( id, start, end )
            inval = self.prefetchQ.get()
        return

class sequence_distributor( Thread ):
    def __init__( self, seqDBQ, hmmsearch_threads, jobEndQ ,SeqDBInQ):
        Thread.__init__( self )
        self.inQ = seqDBQ
        self.jobEndQ = jobEndQ
        self.errorQ  = SeqDBInQ
        self.threads = hmmsearch_threads
        self.n_threads = len( self.threads )
        self.thread_nums = set( range( self.n_threads ) )
        self.fns = { 'add'     : self._addseq ,
                     'Proceed' : self._proceed,
                     'Skip'    : self._skip   ,
                     'load'    : self._load   ,
                     'reset'   : self._reset  ,
                     'die'     : self._die    ,
                   }
        self.dead = False
        self.seqs = set()
#        self.sem = semaphore

    def _addseq( self, seq ):
        # inval <-- seqDB
        self.seqs.add( seq )

    def _die( self, e=None, OTUName=None ):
        import Queue
        if not self.dead:
            sys.stderr.write( '[sequencedistributor.die] dieing to errorQ\n',e )
            try:
                self.errorQ.put( ('die' , e), timeout=1 )
            except Queue.Empty,e2:
                print '[sequencedistributor] error!'
                print e2,'\n',e
                raise
        self.jobEndQ.put( ('die' , e ), timeout=1 )
        self.dead = True
        return

    def _proceed( self, *args ):
        # From seqDB. Starts the cascade.
        self.threads[0].outQ.put( ('reset', self.nodes) ) # --> Scorer. Tells order of nodes to search.
        self.distribute( )
    def _reset( self, *args ):
        self.seqs = set()
        self._load( *args )
    def _skip( self, *args ):
        self.seqs = set()
        #self.threads[0].outQ.put( ( 'reset', self.nodes ) )
    def _load( self, directory , nodes ):
        # inval <-- SSUMMO / Preparer.reverser
        self.nodes = nodes
        self.paths = [ os.path.join( directory, node, node + '.hmm' ) for node in nodes ]
    def run( self ):
        self.threadInd = 0
        inval = self.inQ.get()
        try:
            while inval != 'END':
                fn = inval[0]
                args = inval[1:]
                self.fns[fn]( *args )
                inval = self.inQ.get()
                continue
        except Exception:
            self._die()
            raise
        finally:
            self.inQ.close()

    def distribute( self ):
        self.threadInd = 0
        num_nodes = len(self.paths)
        num_seqs = len( self.seqs )
        self.thread_indices = {}
        min_val = min( [num_nodes, self.n_threads] )

        #print '[distributor.distrute] initiating {0} hmmsearch threads'.format( min_val )
        # Start as many processes as possible, constrained by number of nodes or available processes.
        for i in xrange( min_val ):
            self.threads[i].inPipe.send( [ num_seqs , self.paths[i] ] )  # -->
            self.thread_indices.update( { self.nodes[i] : i } )
            self.threads[i].inPipe.send( self.seqs )    # --> HMMSearchThread
            self.threadInd += 1
            #self.sem.acquire()
        # so if there's more nodes than HMMSearchThread's.
        #print '[distributor.distrute] initiated {0} threads. {1} left'.format( self.threadInd  , num_nodes - min_val )
        if min_val != num_nodes:  
            # This loop covers submitting jobs for the remaining nodes, so long as there's a free thread. #
            #print '[distributor.distribute] trying to initiate remaining {0} threads'.format( num_nodes - min_val )
            for index in xrange( min_val , num_nodes ):  
                self.get_free_thread()
                #self.sem.acquire()   # Removed semaphore implementation. (get_free_thread limits number of processes instead)
                self.threads[self.threadInd].inPipe.send( [ num_seqs , self.paths[index] ] )   # -->
                self.thread_indices.update( { self.nodes[index] :self.threadInd } )
                self.threads[self.threadInd].inPipe.send( self.seqs )  # -->  HMMThread
            # Get the results.
            #print '[distributor.distrute] waiting for another {0} processes to end'.format( min_val )
            for i in xrange( min_val ):
                self.thread_indices.pop( self.jobEndQ.get() )          #  <-- 'OTUName' from Scorer.
        else:
            #print '[distributor.distrute] waiting for another {0} nodes to end'.format( num_nodes )
            for i in xrange( num_nodes ): # === xrange( min_val )
                self.thread_indices.pop( self.jobEndQ.get() )          #  <-- 'OTUName' from Scorer
        #print '[distributor.distrute] ending'
        return

    def get_free_thread( self ):
        node_name = self.jobEndQ.get()  # <-- Scorer
        self.threadInd = self.thread_indices.pop( node_name )
        return

class Preparer( ):
    def __init__( self, options=None ):
        """After initialising, just need to call the enter_loop()
        method.
        
        enter_loop() tries to start the seqDB process as well as
        starting the hmmsearch threads.
        
        Once these are ready, start_threads() returns and
        _cascade() is called.

        _cascade enters the main SSUMMO loop by calling any methods
        defined in self.premethods, which is a list of function
        calls.
        
        We also have lists of function calls for self.recursemethods
        and self.postmethods.

        The default methods are:-
        premethods = [ self.reverser , self.find_start , self._print_nodes ]
        recursemethods = []
        postmethods = []

        By default, the only recurse method is determined by self.find_start,
        depending on CONFIG.options['-start'] (which can be defined on the
        command line).
        """
        self.seqDB = seqDB
        self.premethods = [ self.reverser , self.find_start , self._print_nodes ]
        self.recursemethods = []
        self.postmethods = []
        if options:
            if options['--annotate']:
                self.postmethods = [self.annotate]
            if options['--separate']:
                self.postmethods.append(self.separate)
                pass
        self.START_DIR = str()
        self.START_NODE = dict()
        self.DIVERGE_DICT = dict()
        self.RESULTS = dict()

    def annotate(self):
        if '.' in CONFIG.options['-out']:
            infile , suffix = CONFIG.options['-out'].split('.')
            outfile = '{0}_SSUMMO_annotated.{1}'.format( infile , suffix )
        else: outfile = sys.stdout
        self.seqDB.prefetchQ.put( ('annotate',outfile) )
        self._iter_results()

    def call( self ):
        self.RESULTS,\
            self.DIVERGE_DICT = SSUMMO( self.START_NODE ,
                                        self.START_DIR , 
                                        self.RESULTS, 
                                        self.seqDB , 
                                        self.score_pipe_child , 
                                        diverge_dict=self.DIVERGE_DICT )
        self.RESULTS = self.shuffle_back( self.RESULTS , self.START_DIR  )
        return
        #return self.results, diverge_dict

    def call_iter( self ):
        for node in self.RESULTS.keys():
            if node == 'accessions':
                continue
            self.RESULTS[node],\
                self.DIVERGE_DICT = SSUMMO(self.START_NODE[node],
                                           os.path.join( self.START_DIR, node ),
                                           self.RESULTS[ node ],
                                           self.seqDB,
                                           self.score_pipe_child,
                                           diverge_dict=self.DIVERGE_DICT )
        return
        #return results , diverge_dict

    def _cascade( self ):
        t0 = time.time()
        # Counting for debugging purposes.
        n_before = len( get_accessions( self.RESULTS , accessions=[] ) )
        for method in self.premethods:
            method()
        t = time.time()
        # Counting for debugging purposes.
        n_mid = len( get_accessions( self.RESULTS , accessions = [] ) )
        for method in self.recursemethods:
            method()
        #self.RESULTS, self.DIVERGE_DICT = self.caller( self.RESULTS, self.START_NODE , self.START_DIR , self.DIVERGE_DICT )
        # Counting for debugging purposes.
        n_after = len( get_accessions( self.RESULTS , accessions = [] ) )
        for method in self.postmethods:
            method()
        t = time.time() - t0
        # Counting for debugging purposes.
        n_final = len( get_accessions( self.RESULTS , accessions = [] ) )
        print "##\tRetrieved {0} sequences from {1}, in {1} seconds".format(n_final,n_before, t)
        return self.RESULTS

    def enter_loop( self ):
        """Given the name of a sequence file, makes a blast database
        and provides the entry point for the SSUMMO algorithm. Returns
        a dictionary of results containing taxonomic nodes with matches
        and at each node the accessions that have been assigned there.
        """
        try:
            self._start_threads()
            results = self._cascade()
        except (KeyboardInterrupt,):
            sys.stderr.write('[main thread] SHUTTING DOWN...\n')
            raise
        except Exception:
            raise
        else:
            return results
        finally:
            self.shutdown()

    def find_start( self ):
        """If the -start option was given, then this shuffles the results
        along accordingly and also calls SSuMMo on a single node, whereas 
        at the root, need to iterate through all 3 domains.

        This therefore sets self.start_node and self.start_dir, indicating
        which node we should start SSuMMo.
        """
        tdict = getTaxIndex(silent=True)
        self.START_NODE, self.START_DIR = findStart(tdict )
        if self.START_DIR not in [ CONFIG.arbDBdir , 'Bacteria','Archaea','bac','arc' ]:
            self.RESULTS, self.START_NODE = self.shuffle_along( self.RESULTS,self.START_DIR, self.START_NODE )
            print "# starting at '{0}'".format(self.START_DIR)
            self.recursemethods.append( self.call )
        else:
            self.recursemethods.append( self.call_iter )

    def _final_merge( self ):
        self.RESULTS = finalMerge( self.RESULTS, self, self.DIVERGE_DICT )

    def _forget( self, results ):
        """Finds all accessions in results, and gets the seqDB instance to forget
        the rest."""
        accessions = []
        for result in results.values():
            if 'accessions' in result:
                accessions += result['accessions'] 
        self.seqDB.prefetchQ.put(( 'forget_inverse', accessions ))
        return

    def _iter_results(self):
        """Called by annotate and separate to send accessions and their
        taxonomy through seqDB.prefetchQ
        """
        unknowns = re.compile(r'(unidentified)|(incertae)|(unknown)|(uncultured)',re.I)
        for path,node in dict_walk('', self.RESULTS ):
            if 'accessions' in node:
                taxpath = path.split( os.path.sep )
                if unknowns.search( taxpath[-1] ):
                    taxnode = '[{0}]'.format(', '.join( taxpath[-1:-3:-1] ))
                else:
                    taxnode = '[{0}]'.format(taxpath[-1])
                for accession in node['accessions']:
                    self.seqDB.prefetchQ.put( ( accession, taxnode ) )
        self.seqDB.prefetchQ.put( (False,False) )
        return

    def _print_nodes( self ):
        for node in self.RESULTS.keys():
            if node == 'accessions':
                continue
            print_tree_line( node, self.RESULTS, 0 )

    def reverser( self ):
        nodes = { 'Archaea'    : {},
                  'Bacteria'   : {},
                  'Eukaryota'  : {},
                  'rArchaea'   : {},
                  'rBacteria'  : {},
                  'rEukaryota' : {} }
        try:
            self.distribute_Q.put( ('load', CONFIG.arbDBdir, sorted(nodes.keys()) , ) )  ## --> sequence_distributor
            results, self.DIVERGE_DICT = score2( self.score_pipe_child, nodes, CONFIG.arbDBdir )
            self.RESULTS = self._reverse( results )
        finally:
            self.reverse_pipe_in.send( 'END' ) # --> seqDB
        return 

    def _reverse( self, results):
        Ireversed = 0
        total = 0
        delete = []
        self._forget( results )
        for node in sorted(results.keys()):
            self.Write( '##  In {0}, '.format( node ) )
            if node[:1] == 'r':  # top match is a reverse node.
                choice = node[1:]
                if 'accessions' in results[node].keys():
                    accessions = results[node].pop( 'accessions' )
                    self.Print( "there's {0} sequences:-".format( len( accessions ) ) )
                    for accession in accessions:
                        Ireversed += 1
                        self.reverse_pipe_in.send( accession )
                    if choice not in results:
                        results.update( { choice : { 'accessions' : accessions } } )
                    else:
                        if 'accessions' in results[choice].keys():
                            results[choice]['accessions'] +=  accessions 
                        else:
                            results[choice].update( { 'accessions' : accessions } )
                else:
                    pass
                delete.append( node )
                continue
            else:
                if 'accessions' in results[node].keys():
                    self.Print( "there's {0} sequences:-".format( len( results[node]['accessions'] ) ) )
                else:
                    delete.append( node )
        for node_to_go in delete:
            del( results[node_to_go] )
        delete = []
        for node in results:
            if 'accessions' not in results[node].keys():
                delete.append( node )
            else:
                total += len( results[node]['accessions'] )
        for node_to_go in delete:
            del( results[node_to_go] )
        if Ireversed > 0:
            self.Print( '# Reversed {0} sequences'.format( Ireversed ) )
        return results

    def separate(self):
        if '.' in CONFIG.options['-out']:
            infile , suffix = CONFIG.options['-out'].rsplit('.',1)
            outfile = '{0}_SSUMMO_separated.{1}'.format( infile , suffix )
        else:
            outfile = sys.stdout
        self.seqDB.prefetchQ.put( ('separate',outfile) )
        self._iter_results()

    def shuffle_back( self , results , start_dir ):
        full_results = {}
        node_path = start_dir[ len( CONFIG.arbDBdir ):].strip( os.path.sep ).split( os.path.sep )
        for node in node_path:
            full_results.update( { node : {} } )
        full_results.update( results )
        return full_results

    def shuffle_along( self , results_dict , start_dir ,start_node):
        rel_start = start_dir[ len( CONFIG.arbDBdir ) : ].strip( os.path.sep )
        rel_path = rel_start.split( os.path.sep )
        first = rel_path[0]
        accessions = [ accession for accession in get_accessions( results_dict[ first ], accessions=[] ) ]
        for path in rel_path:
            results_dict = results_dict[path]
        results_dict.update( { 'accessions' : accessions } )
        return results_dict, start_node

        last_dir = os.path.realpath(CONFIG.arbDBdir).rstrip( os.path.sep ).rsplit( os.path.sep,1 )[-1]
        start_dir_list = os.path.realpath(start_dir).rstrip( os.path.sep ).split( os.path.sep )
        if last_dir == start_dir_list[-1]:
            folders = []
        else:
            try:
                # Try to find the nodes in relation to the arbDBdir.
                folders = start_dir_list[ start_dir_list.index( last_dir ) + 1: ]
            except ValueError:
                raise ValueError( 'arbDBdir in CONFIG.py ({0}) must start with same file path as -start ({1})'.format( CONFIG.arbDBdir , start_dir  ))
        node = results_dict
        #start_nodes = set( [folder for folder in folders] )
        start_nodes = set( start_node.keys() )
        for folder in folders:
            ## Delete irrelevant nodes from start_node.
            node_names = set( node.keys() )
            other_nodes = node_names.difference( start_nodes )
            for other_node in other_nodes:
                del( node[other_node] )
            ## Move into next node.
            if 'accessions' in node:
                node.update( { folder : { 'accessions': node.pop('accessions') } } )
            try:
                node = results_dict[folder]
                start_node = start_node[folder]
            except KeyError,e:
                sys.stderr.write( repr(e) + '\n' )
        return results_dict, start_node

    def shutdown( self ):
        if CONFIG.debug:
            print '[Preparer.shutdown] END --> seqDB'
        self.seqDB.prefetchQ.put('END')  ## SeqDB will end Distributor. Distributor will end hmmsearch threads.
        #print '[Preparer.shutdown] stopping Scorer'
        #self.hmmsearch_outQ.put('END')  ## Scorer inQ
        if CONFIG.debug:
            print '[Preparer.shutdown] joining seqDB'
        self.seqDB.join()
        if CONFIG.debug:
            print '[Preparer.shutdown] joining threads'
        for thread in self.threads:  # Is it quicker to do in two loops or one?
            thread.join()
        if CONFIG.debug:
            print '[Preparer.shutdown] joining scorer'
        self.scorer.join()
        if CONFIG.debug:
            print '[Preparer.shutdown] successfully joined everything'

    def _start_threads( self ):
        self.threads = []
        thread_pipes = [multiprocessing.Pipe() for i in xrange( int(CONFIG.options['-ncpus']))]
        self.score_pipe_parent,self.score_pipe_child = multiprocessing.Pipe()
        self.hmmsearch_outQ = multiprocessing.Queue()
        self.lock = multiprocessing.RLock()
        for i in xrange(int(CONFIG.options['-ncpus'])):
            self.threads.append( HMMSearchThread( thread_pipes[i][0], thread_pipes[i][1], self.hmmsearch_outQ ,self.lock) )
        CONFIG.printHeaders = True  ## Doesn't actually work. In case we wanted to print hmmsearch results.
        for thread in self.threads:  
            thread.start()
        self.seq_file_name = CONFIG.options['-in']
        #### Create queues & pipes.
        self.prefetchQ = multiprocessing.Queue() # Send accessions for seqDB prefetching.
        self.distribute_Q = multiprocessing.Queue()
        distrib_end_Q = multiprocessing.Queue()
        self.reverse_pipe_in , self.reverse_pipe_out = multiprocessing.Pipe()
        #### Create processes / threads (calls __init__ on each one).
        self.seqDB = seqDB(self.seq_file_name, 
                           self.prefetchQ, 
                           self.distribute_Q,
                           distrib_end_Q,
                           self.threads,
                           self.lock,
                           reverse_pipe = self.reverse_pipe_out,
                           format=CONFIG.options['-format'] )
        self.scorer = Scorer( self.hmmsearch_outQ, self.score_pipe_parent, distrib_end_Q , self.prefetchQ )
        #### Start them.
        self.scorer.start()
        self.seqDB.start()

    def Print( self, string ):
        self.lock.acquire()
        print string
        self.lock.release()

    def Write( self, string ):
        self.lock.acquire()
        sys.stdout.write( string )
        sys.stdout.flush()
        self.lock.release()

class LocalOptions( Options ):
    def __init__( self,args=None ):
        self.options = {
                    '-start'     : CONFIG.arbDBdir,
                    '-in'        : [] ,
                    '-out'       : None , 
                    '-ncpus'     : multiprocessing.cpu_count() - 1,
                    '-Eval'      : str(10),
                    '-format'    : 'fasta',
                    '-score'     : str(1),
                    '-servers'   : ('localhost'),
                    '--createXML': False,
                    '--verbose'  : False,
                    '--max'      : False,
                    '--annotate' : False,
                    '--separate' : False,
                    '--debug'    : False,
                }
        self.helpTxt = {'-start' : 'Start node for SSUMMO. Can be a domain, or deeper, but must start at least with the domain',
                    '-in'     : 'Query sequence file.',
                    '-out'    : 'Output results file names. Suffix will change. [Optional - prefix inferred from -in]',
                    '-ncpus'  :'Number of worker processes to initiate. [CPU count - 1]',
                    '-format' :'Input file sequence format [fasta].',
                    '-servers': 'Server names. [Not implemented yet].',
                    '-Eval'   : 'hmmsearch Evalue threshhold [10]',
                    '-score'  :'hmmsearch score threshold [1]',
                }
        self.switches = {
                    '--createXML':'Create phyloxml output?? [No]',
                    '--verbose' : 'Print HMMer results [No!]',
                    '--max' : 'Use hmmsearch --max flag (bypass filters that remove sequences from full scoring set). [no]',
                    '--separate':'Separate out annotated 16S rRNA sequences. [False]',
                    '--annotate':'Annotate 16S rRNA sequences with species name. [False]',
                    '--debug' : 'Turn on debug output to help with threading issues',
            }
        self.useage = "python {0} [option [arg]] ...".format( sys.argv[0] )
        self.example = self.useage
        self.singleargs = [ '-ncpus','-servers','--createXML','-start','-format','--verbose','--max','-score','-Eval']
        self.multiargs = [ '-in','-out' ]
        if args is not None:
            self.localParseArgs( args )
    def __iter__( self ):
        for i in self.options:
            yield i
    def localParseArgs(self,args):
        self.options = self.parseArgs( args )
        if self['-out'] is None:
            self.options.update( { '-out' : [] } )
            for option in self.options['-in']:
                if '.' in option:
                    prefix , suffix = option.rsplit('.',1)
                else:
                    prefix = option
                self.options['-out'].append( prefix + '.' )
        if len( self['-out'] ) != len ( self['-in'] ):
            sys.stderr.write( 'Must supply the same number of options to -in as to -out\n')
            sys.stderr.write( '\n.Got {0} and {1}, respectively.\n'.format(len( self['-in']) , len(self['out']) ) )
            raise IOError # -out specified, but have different number of -in files.

def save( save_name , object ):
    try:
        with file( save_name,'wb' ) as save_file:
            pickle.dump( object, save_file , -1 )
            print "##\tSaved pickle object to '{0}' (Use this to draw trees etc. other scripts)".format( save_file.name )
    except IOError:
        if sys.stdin.closed:
            suffix = 'results'
        else:
            suffix = 'results'
            if len(suffix) == 0:
                suffix = 'results'
        save_name = suffix + '.pkl'
        i = 0
        while os.path.exists( save_name ):
            i += 1
            save_name = '{0}{1}.pkl'.format( suffix,i )
        print '##\tSaving to {0}'.format( save_name )
        CONFIG.options['-out'] = save_name
        save( save_name , object )

def main( options ):
    t0 = time.time()
    ins, outs = ( options['-in'], options['-out'], )
    CONFIG.options = options
    CONFIG.debug   = options['--debug']
    n_files = len( ins )
    for file_ind in xrange( n_files ):
        t1 = time.time()
        #CONFIG.options = options.deepcopy()
        CONFIG.options.options['-in' ] = ins[file_ind ]
        CONFIG.options.options['-out'] = outs[file_ind]
        Iprepare = Preparer(CONFIG.options)
        results_dict = Iprepare.enter_loop( )
        print "##\t{0} ambiguous results".format(CONFIG.ambiguousCount)
        ### Save results files
        save( '{0}.pkl'.format(CONFIG.options['-out'].rstrip('.')) , results_dict )
        if options['--createXML']:
            startNode,startDir = findStart( getTaxIndex())
            print "##\tWriting phyloxml file to '{0}.xml'".format(options['-out'])
            with file('{0}.xml'.format(options['-out']),'w') as write_handle:
                write_xml(startDir,write_handle,results_dict)
        t = time.time()
        mins = int(t-t0) / 60
        print "##\tFinished {0} in {1} mins{2} secs".format( CONFIG.options['-in'], mins, (t - t0) - (mins*60))
    t = time.time()
    mins = int(t-t0) / 60
    print "##\tFinished everything in {0} mins{1} secs".format( mins, (t - t0) - (mins*60))


if __name__ == '__main__':
    args = sys.argv[1:]
    CONFIG.options = LocalOptions( args )
    #cProfile.run( 'main( CONFIG.options )' )
    main( CONFIG.options )
