#!/usr/bin/env python
from dict_to_phyloxml import write_xml
from count_hmms import countseqs
from ssummolib import Options, load_index, dict_walk , get_accessions
import os
import re
import sys
import time
import cPickle as pickle
import subprocess
import multiprocessing
import CONFIG
import Bio.SeqRecord
from Bio import SeqIO
#import ArbIO
from threading import Thread
CONFIG.ambiguousCount = 0




def getTaxIndex(silent=False):
    tdict = load_index(silent=silent)
    for key in tdict.keys():
        if key not in ['Bacteria','Eukaryota','Archaea']:
            del(tdict[key])
    return tdict




class Parser( ):
    def __init__( self, lock=None,verbose=False,domtblout=False ):
        self.lock = lock if lock==None else multiprocessing.Lock()
        if lock == None:
            self.lock = multiprocessing.Lock()
        else:
            self.lock = lock
        if verbose:
            self.printHeaders = True
            self.verbose = True 
        if domtblout:
            self.set_scorer( True )
        else:
            self.set_scorer( False )
        self.line_splitter = re.compile( '\s+' )

    ## Line parsing
    def _parse_line( self, *args ):
        """API for attaching line parsing methods. Have verbose & non-verbose, configured with
        set_verbose( True|False )
        """
        pass
    def _parse_verbose( self, line ):
        with self.lock:
            print line,
        return self._get_scores( line )
    def _silent_parse( self, line ):
        return self._get_scores( line )
        
    def set_verbose( self, print_line=False ):
        if print_line:
            self.verbose = True
            self._parse_line = self._parse_verbose
        else:
            self.verbose = False
            self._parse_line = self._silent_parse

    ## Extracting scores
    def _get_scores( self, *args ):
        """API for attaching hmmsearch result parsers. Currently have a table and domain table
        parser, tested against hmmsearch v3.0.
        Can switch between the two with set_scorer( True|False ).
        """
        pass
    def _domtbl_scores( self, line ):
        values = self.line_splitter.split( line.rstrip(),22 )
        #                                                                                     --- full sequence --- -------------- this domain -------------           hmm    coord    ali     coord   env coord
        # target name                 accession   tlen query name           accession   qlen   E-value  score  bias   #      of    c-Evalue  i-Evalue  score    bias  from       to    from       to      from    to     acc     description of target
        # accession,                  null,      seqLen,  HMM,                null2,   HMMLen,   Eval,  score, bias,nDomains,null3,cEval,   iEval,    domscore,dombias,hmmstart,hmmend,alistart,aliend,envstart,envend,accuracy,description
        #  0                          1           2        3                    4       5         6      7     8      9      10    11       12         13        14    15        16    17         18       19     20      21       22
        result_dict = {'accession'  : values[0]        ,  # accession,
                       'seqLen'     : int(values[2])   ,  # seqLen,   
                       'HMM'        : values[3]        ,  # HMM,      
                       'HMMLen'     : values[5]        ,  # HMMLen,   
                       'Eval'       : float(values[6]) ,  # Eval,     
                       'score'      : float(values[7]) ,  # score,    
                       'bias'       : float(values[8]) ,  # bias,     
                       '#'          : int(values[9])   ,  # nDomains, 
                       'cEval'      : float(values[11]),  # cEval,    
                       'iEval'      : float(values[12]),  # iEval,    
                       'domscore'   : float(values[13]),  # domscore, 
                       'dombias'    : float(values[14]),  # dombias,  
                       'hmmstart'   : int(values[15])  ,  # hmmstart, 
                       'hmmend'     : int(values[16])  ,  # hmmend,   
                       'alistart'   : int(values[17])  ,  # alistart, 
                       'aliend'     : int(values[18])  ,  # aliend,   
                       'envstart'   : int(values[19])  ,  # envstart, 
                       'envend'     : int(values[20])  ,  # envend,   
                       'accuracy'   : float(values[21]),  # accuracy, 
                       'description': values[22],  # seqDesc,  
                       }
        return result_dict

    def _tbl_scores( self, line ):
        values = self.line_splitter.split( line )
        result_dict = { 'accession' : values[0],
                        'Eval'      : float(values[4])  ,
                        'score'     : float(values[5])  ,
                }
        return result_dict
    def set_scorer( self, domain_table ):
        """If parsing hmmsearch results from the `--domtblout` option,
        then the tabular output is different. Pass True to parse
        domain table results. This is the default.
        Otherwise, pass False
        """
        if domain_table:
            self._get_scores = self._domtbl_scores
        else:
            self._get_scores = self._tbl_scores

    ## Header parsing
    def _parse_header( self, *args ):
        """API for parsing headers. If you want to change from silent
        to verbose parsing or vice-versa, use `set_parse_header(bool)`"""
        pass
    def _silent_header( self, line ):
        return
    def _parse_header_verbose( self, line ):
        with self.lock:
            print line
        return
    def set_parse_header( self, print_header=True ):
        if print_header:
            self.printHeaders = True
            self._parse_header = self._parse_header_verbose
        else:
            self.printHeaders = False
            self._parse_header = self._silent_header

    def parse( self, hmmsearchProcess ):
        ## *** The parser!! *** ##
        try:
            for line in hmmsearchProcess.stdout:
                if line[:1] == '#':
                    self._parse_header( line )
                else:
                    yield self._parse_line( line )
            retCode = hmmsearchProcess.wait()
            if retCode != 0:
                sys.stderr.write( hmmsearchProcess.stderr.read() )
                sys.stderr.flush()
                raise IOError
        except Exception:
            raise
        finally:
            hmmsearchProcess.stdout.close()
            hmmsearchProcess.stderr.close()

def MergeDiverge( resultsDict, DivDict, tax_node ):
    """ Given the results_node dictionary (resultsDict), add accessions present
    in DivDict to the appropriate nodes in resultsDict."""
    tax_keys = tax_node.keys()
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        for node in DivDict[accession].keys():
            if node == 'start_node' or node == 'start':
                continue
            else:
                cur_node = os.path.sep.rsplit( node,1 )[-1] if os.path.sep in node else node
                if cur_node in tax_keys:
                    if cur_node in resultsDict:
                        resultsDict[cur_node]['accessions'].append(accession)
                    else:
                        resultsDict.update( { cur_node : { 'accessions' : [accession] } } )
                else:
                    continue
    return resultsDict,DivDict

def finalMerge( resultsDict, DivDict ):
    reg = re.compile( '|'.join([ '(?<={1})({0}){1}'.format(key,os.path.sep) for key in resultsDict.keys() ] ) )
    # reg checks for Bacteria, Archaea or Eukaryota essentially..
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        firstNode = reg.search(start)
        if firstNode:
            pathList = start[firstNode.start():].split( os.path.sep )
        else:
            pathList = []

        # get to the start node
        node = resultsDict
        for OTU in pathList:
            if OTU not in node:
                node.update( { OTU : {} } )
            node = node[OTU]

        divKeys = DivDict[accession].keys()
        for subnode,path in dict_walk( '' , node ):
            if 'accessions' in subnode:
                if accession in subnode['accessions']:
                    del( node['accessions'][ node['accessions'].index( accession ) ] )
        if 'accessions' in node.keys():
            node['accessions'].append( accession )
        else:
            node.update( { 'accessions' : [accession] } )

        continue
        del(divKeys[divKeys.index('start_node')])
        del(divKeys[divKeys.index('start')])
        for path_to_node in divKeys:
            end_node = node
            try:
                for taxa in path_to_node.split( os.path.sep ):
                    end_node = end_node[taxa]
            except KeyError:
                continue
                print end_node.keys()
                raise
            if 'accessions' in end_node:
                if accession in node['accessions']:
                    del( end_node['accessions'][ end_node['accessions'].index( accession ) ] )

    return resultsDict

def SepDiverge(resultsDict, DivDict,node_keys,seq_db):
    """Given the results dictionary and the diverge dictionary,
    check the scores present in DivDict, looking for a single
    highest score.
    If there is a single highest score, place that in the 
    appropriate location and delete it from DivDict.
    Otherwise, don't change anything."""
#   print "##\t{0} ambiguous sequences: {1}".format(len(DivDict.keys()) ,[seq_db.info[acc]['desc']for acc in DivDict.keys()]  )
    for accession in DivDict:
        divKeys = DivDict[accession].keys()
        del(divKeys[divKeys.index('start_node')])
        del(divKeys[divKeys.index('start')])
        scores = []
        for node in divKeys:
            scores.append( DivDict[accession][node] )
        bestScore = max(scores)
        nBest = scores.count(bestScore)
        if nBest == 1:
            print "We got a winner!"
            bestNode = divKeys[ scores.index(bestScore) ]
            if os.path.sep in bestNode:
                bestNode = bestNode.rsplit(os.path.sep,1)[1]
            if bestNode in resultsDict.keys() and accession not in resultsDict[bestNode]:
                resultsDict[bestNode]['accessions'].append(accession)
            else:
                if accession not in bestNode['accessions']:
                    resultsDict.update( { bestNode : { 'accessions' : [ accession ] } } )
                else:
                    print "Don't think we need this if clause in SepDiverge"
 #               continue
                # I replaced the below line with continue to attempt to fix a bug. This may not have fixed that
                # bug and instead caused other undesired behaviour.
                #DivDict[accession] = { bestNode : [ bestScore ] , 'start' : DivDict[accession]['start'] }
            del(DivDict[accession][bestNode])
            clean_converged( accession , DivDict[accession] )
            del( DivDict[accession] )
        else:
            continue
    return resultsDict, DivDict

def clean_converged( accession , diverge_dict_item ) : 
    print "{0} has converged!"
    div_keys = diverge_dict_item.keys()
    initial_node = diverge_dict_item['start_node']
    del( div_keys[div_keys.index('start_node')] ) ; del( div_keys[div_keys.index('start')] )
    for route in div_keys:
        if os.path.sep in route:
            path = route.split( os.path.sep )
        else:
            path = [route]
        node = initial_node
        for p in path:
            node = node[p]
        if accession in node['accessions']:
            del( node['accessions'][ node['accessions'].index(accession)] )
    return

def find_rel_path( node, OTUname,RelPath=[] ):
    """Given a dictionary node, and an OTU name, will search for 
    OTUname from the top of node and return the relative path, 
    delimited by '/'"""
    for key in node.keys():
        if key == OTUname:
            RelPath.append(key)
            return RelPath
        elif key == 'accessions':
            continue
        else:
            nextPath = RelPath + [key]
            x = find_rel_path( node[key] , OTUname, RelPath = nextPath )
            if type(x) == list:
                return x
            else:
                continue

def score2( scorer_pipe, tax_node, path, results_dict = None, diverge_dict = None ):
    """Called once per iteration. Therefore needs to receive the node names once
    at the start. Then it should start receiving lots of hmmmsearchThread results.
    """
    if results_dict is None:
        results_dict = {}
    elif 'accessions' in results_dict:
        results_dict.pop( 'accessions' )
    if diverge_dict is None:
        diverge_dict = {}
    inval = scorer_pipe.recv()  # This'll be the (sorted!) node names.
    nodes = inval
    inval = scorer_pipe.recv()  # This'll then be (accession, results,) for a single sequence.
    #results_dict = {}
    choices = set()
    unique_accessions = []
    ambiguous_nodes = {}
    while inval != 'end':
        try:
            accession, results = inval
        except Exception:
            print inval
            raise
        Evals, scores = results
        best_score = max( scores )
        n_best = scores.count( best_score )
        if n_best == 1:
            choice = nodes[ scores.index( best_score ) ]
            if choice in choices:
                results_dict[choice]['accessions'].append( accession )
            else:
                choices.add( choice )
                results_dict.update( { choice : {'accessions': [accession] } } )
            if accession in unique_accessions:
                print "Already got {0} assigned to".format( accession ),
                for ch in choices:
                    if accession in results_dict[ch]['accessions']:
                        print ch,' ',
                    print 'Now we have it for {0}'.format( choice )
            unique_accessions.append( accession )
        elif n_best > 1: ## Ambiguous node!
            CONFIG.ambiguousCount += 1
            prev_index = -1
            top_scorers = []
            for count in xrange( n_best ):
                prev_index = scores.index( best_score, prev_index + 1 )
                top_scorers.append( nodes[prev_index] )
                if accession in diverge_dict.keys():
                    OTU_order = os.path.sep.join(  find_rel_path( diverge_dict[accession]['start_node'] , top_scorers[-1] )  )
                    diverge_dict[accession].update( { OTU_order : best_score } )
                else:
                    diverge_dict.update( { accession : { top_scorers[-1] : best_score, 'start' : path , 'start_node' : tax_node } } )
            top_scorers = tuple( top_scorers )
            if top_scorers in ambiguous_nodes:
                ambiguous_nodes[top_scorers] += 1
            else:
                ambiguous_nodes.update( { top_scorers : 1 } )
            #print 'Ambiguous node between: {0}'.format( ' & '.join( top_scorers ) )
        elif n_best == 0:
            print '##\tno matches for accession {0}'.format(accession)
        else:
            print "##\tn_best = {0}".format(n_best)
            raise ValueError("Can't figure out the winning nodes")
        inval = scorer_pipe.recv()
    if len( ambiguous_nodes ) > 0:
        print_ambiguous_results( ambiguous_nodes )
    return results_dict, diverge_dict

def print_ambiguous_results( amb_nodes ):
    print '# Ambiguous nodes:-\n',
    for amb_node in amb_nodes:
        comp = ' & '.join( amb_node )
        print '# ' + comp.ljust(40) , ' - {0}'.format( amb_nodes[amb_node] )

class Scorer( Thread ):
    """
    Let's put (through inQ) results here as they're produced by
    parseHMMThread. This will continue to collate and order them
    as they're made ready. As soon as a sequence has all results
    made ready for it, then we'll pass over to scorer.
    """
    def __init__( self, hmmsearch_outQ, result_pipe, distrib_end_queue ):
        Thread.__init__(self)
        self.inQ = hmmsearch_outQ
        self.out_pipe = result_pipe
        self.d_Q = distrib_end_queue
        self.fns = {'end'    : self._end,
                    'update' : self._update,
                    'reset'  : self._reset,
                    'die'    : self._die,
                   }
        self.err_count = 0
    def _reset( self , nodes ):
        # from SSUMMO / reverser
        self.results = {}
        self.n_accessions_got = {}
        self.results_val = []
        self.hmm_indices = {}
        self.out_pipe.send( nodes ) #  --> score2
        self.n_to_do = len( nodes )
        for i,node in enumerate( nodes ):
            self.hmm_indices.update( { node : i } )
            self.results_val.append( float() )
        self.n_done = 0

    def _update( self, OTU, results ):
        # Ordered by HMMSearchThread
        (accession, Eval, score, ) = (results['accession'] , results['Eval'] , results['score'], )
        HMM_index = self.hmm_indices[ OTU ]
        ## Update result for this accession.
        if accession in self.results:
            self.results[accession][0][ HMM_index ] = Eval
            self.results[accession][1][ HMM_index ] = score
            self.n_accessions_got[accession] += 1
            #if self.n_accessions_got[accession] == self.n_to_do:
            #    self.out_pipe.send( (accession, self.results.pop( accession ),) )   #  -->  score2
        else:
            self.results.update( { accession : [ self.results_val[:], self.results_val[:] ] } )
            self.n_accessions_got.update( { accession : 1 } )
            self.results[accession][0][ HMM_index ] = Eval
            self.results[accession][1][ HMM_index ] = score
        return

    def _end( self, OTU ):
        # Orders from HMMSearchThread
        self.n_done += 1
        self.d_Q.put( OTU )  # --> Tell distributor what Thread just finished.
        if self.n_done == self.n_to_do:
            for accession in self.results.keys():  
                self.out_pipe.send( ( accession, self.results.pop(accession), ) )
            self.out_pipe.send( 'end' ) # --> score2
            del( self.results )

    def _die( self, error ):
        if self.err_count == 0:
            sys.stderr.write( error )
            self.err_count += 1
            self.out_pipe.send('end') # --> score2

    def run(self):
        inval = self.inQ.get()
        while inval != 'END':
            fn = inval[0]
            args = inval[1:]
            self.fns[fn]( *args )
            inval = self.inQ.get()
        return

#class HMMSearchThread( Thread):
        #Thread.__init__(self)
class HMMSearchThread( multiprocessing.Process ):
    def __init__(self,inPipe,outPipe, d_Q, lock):
        multiprocessing.Process.__init__(self)
        if os.uname()[-1] == 'x86_64':
            self.hmmsearch = os.path.join( CONFIG.hmmerdir, CONFIG.hmmsearchCMD )
        else:
            self.hmmsearch = os.path.join( CONFIG.hmmer32dir, CONFIG.hmmsearchCMD )
        self.inPipe, self.__outPipe = (inPipe, outPipe, )
        self.outQ = d_Q  ## Goes to Scorer.
        #self.sem = semaphore
        if CONFIG.options['--max']:
            self.command = [ self.hmmsearch,'-o',os.devnull,'--max','--domtblout','/dev/stdout','--noali','REPLACED','/dev/stdin' ]
        else:
            self.command = [ self.hmmsearch,'-o',os.devnull,'--domtblout','/dev/stdout','--noali','REPLACED','/dev/stdin' ]
        CONFIG.printHeaders = False
        self.lock = lock
        self.parser = Parser()
        self.parser.set_scorer(True)  # parse domtblout instead of tblout

    def run(self):
        self.Eval = CONFIG.options['-Eval']
        self.score = CONFIG.options['-score']
        parser = self.parser.parse
        if CONFIG.options['--verbose']:
            self.parser.set_verbose(True)
        else:
            self.parser.set_verbose(False)
        nseqs,HMMLocation = self.__outPipe.recv()
        while nseqs != 'END':
            self.command[-2] = HMMLocation  ### This replaces 'REPLACED' in self.command, setting the HMM location.
            try:
                process = subprocess.Popen( self.command ,shell=False,stdin=subprocess.PIPE,stderr=subprocess.PIPE,stdout=subprocess.PIPE,bufsize=-1)
            except OSError:
                sys.stderr.write( "COMMAND FAILED: {0}\n".format( self.command ) )
                raise
            try:
                # feed in the sequences received from distributor.
                for seq in self.__outPipe.recv():
                    process.stdin.write( seq )
            except IOError,e:
                self.lock.acquire()
                sys.stderr.write( 'hmmsearch error:-\n{0}\n\n'.format(process.stderr.read() ))
                sys.stderr.flush()
                self.lock.release()
                self.outQ.put( ('die',e) ) # --> Scorer
                raise
            finally:
                process.stdin.close()
            OTUName = HMMLocation[ HMMLocation.rfind( os.path.sep ) + 1: HMMLocation.rfind('.') ]
            for results in parser( process ):
                self.outQ.put( ('update',OTUName, results,)  )   # --> Scorer
            #self.sem.release()  # Place if using semaphores, which seem unnecessary..?
            self.outQ.put( ('end',OTUName,) ) ## --> Scorer
            nseqs,HMMLocation = self.__outPipe.recv()
        self.__outPipe.close()
        return

def SSUMMO(node, path,results_node,seqDB,result_pipe,diverge_dict={}):
    ## At each path, choose which dir to go into, by performing
    ## hmmsearch on the fasta sequence against each next HMM.
    depth = path.count( os.path.sep ) - CONFIG.arbDBdir.count( os.path.sep )
    node_keys = sorted(node.keys())
    if 'accessions' in node_keys:
        del(node_keys[node_keys.index('accessions')])
    num_nodes = len(node_keys)

    if num_nodes == 0:
        return results_node, diverge_dict
    ## Merge ambiguous results with results_node
    results_node,diverge_dict = MergeDiverge(results_node,diverge_dict, node)
    seqDB.outQ.put(( 'load', path, node_keys, )) ## --> sequence_distributor.

    ## First time we enter SSUMMO(), seqDB already has all sequences loaded.
    if os.path.realpath(path) == os.path.realpath( CONFIG.options['-start']):
        seqs = 'all'  
        seqDB.prefetchQ.put( ('get_all',None ) )
    else:
        accessions = results_node['accessions']
        seqDB.prefetchQ.put(('get',accessions,))  ## --> seqDB.
    if num_nodes == 1:  ## If only one node, no need for hmmsearch'ing.
        singleNode = node_keys[0]
        results_node =  { singleNode: { 'accessions' : results_node.pop( 'accessions' ) } }
        seqDB.prefetchQ.put( ('Skip',None) )
    else:  
        seqDB.prefetchQ.put( ('Proceed',None) )
        results_node, diverge_dict = score2( result_pipe, node , path ,  diverge_dict=diverge_dict)

    results_node, diverge_dict = SepDiverge(results_node,diverge_dict,node_keys,seqDB)
    if len(results_node) == 0 and diverge_dict=={}:
        print "##\tNo results"
        print "##\t", node_keys
        print "##\t", path
        # Actually could throw an error in here. i.e. no need for the clause..? Never actually seen these printed.
        return results_node, diverge_dict 
    ## Separate the results after doing SSUMMO on all nodes.
    choices = sorted( results_node.keys() )

    ## Recursively do SSUMMO.
    for choice in choices: 
        if len(results_node[choice]['accessions']) == 0:
            del(node[choice])  ## If node has no results, delete it.
        else:  ## otherwise recursively do SSuMMO on winning node.
            print_tree_line( choice, results_node, depth )
            results_node[choice], diverge_dict = SSUMMO(node.pop(choice), os.path.join(path, choice ), results_node[choice],seqDB,result_pipe,diverge_dict=diverge_dict)
    return results_node, diverge_dict

def print_tree_line( choice,results_node,depth ):
    choices = sorted( results_node.keys() )
    if choice == choices[0] and len(choices) == 1:
        sys.stdout.write( '{0}|- {1}'.format(' '*depth*3, choice ).ljust(45)     +
                ' ({0})\n'.format( len(results_node[choice]['accessions'] ) ))
    elif choice == choices[0]:
        sys.stdout.write( '{0}+- {1}'.format(' '*depth*3, choice ).ljust(45)     +
                ' ({0})\n'.format( len( results_node[choice]['accessions']) ) )
    elif choice == choices[-1]:
        sys.stdout.write( '{0}|_ {1}'.format(' '*depth*3, choice ).ljust(45)     + 
                ' ({0})\n'.format( len( results_node[choice]['accessions']) ) )
    else:
        sys.stdout.write( '{0}|- {1}'.format(' '*depth*3, choice ).ljust(45)     +
                ' ({0})\n'.format(len(results_node[choice]['accessions']) ) )
    sys.stdout.flush()
    return

def findStart( tdict ):
    """Locates the directory where to enter the SSUMMO loop.
    Default is to start in arbDBdir, which is configured in
    CONFIG.py.
    To change, give the command option '-start /some/path/to/dir'
    """
    if os.path.realpath(CONFIG.options['-start']) == os.path.realpath(CONFIG.arbDBdir):
        return tdict, CONFIG.arbDBdir
    else:
        found = False
        startKeys = tdict.keys()
        startDir = CONFIG.options['-start']
        startDir = startDir.rstrip(os.path.sep)
        pathList = CONFIG.options['-start'].split( os.path.sep)
        for key in startKeys:
            if key in pathList:
                firstNode = pathList.index(key)
                found = True
                break
            else:
                continue
        node = tdict
        if not found:
            return tdict, CONFIG.arbDBdir
        for nodeName in pathList[firstNode:]:
            if nodeName.strip() == '':
                continue
            else:
                node = node[nodeName]
                parentNode = nodeName
        startDir = os.path.join( CONFIG.arbDBdir,os.path.sep.join(pathList[firstNode:]) )
        CONFIG.options['-start'] = startDir
    print "##\tStarting SSUMMO from node '{0}' at path '{1}'".format(parentNode, startDir)
    return node, startDir

#class seqDB( multiprocessing.Process ):
class seqDB( Thread ):
    def __init__( self, seqfile, prefetchQ, distribute_Q, distributor_end_queue, threads,lock,reverse_pipe=None,format='fasta'):
        """This is the only process that reads the sequence input file (seqfile).

        prefetchQ must be a multiprocessing.Queue() object. In this, you must put the
           following keywords, which are referred to in self.functions:-
               *    reverse
               *    forget
               *    slice
               *    get_all
               *    get
               *    Skip
               *    Proceed
        See the help for the relevant function to see what they do.
        You can add you own functions too.

        distribute_Q - Don't touch. Scorer uses this to synchronise finished nodes.
        threads   - list of threads which sequences shall be distributed to.
        lock      - multiprocessing.Lock() instance
        semaphore - multiprocessing.semaphore() instance. [removed semaphore implementation. Seemed unnecessary]
        reverse_pipe - Pipe used by seqDB.reverse()
                       Send accessions down the other pipe end, and
                       this will reverse them.
        format    - format of sequences in the sequence file.
        """
        #multiprocessing.Process.__init__(self)
        Thread.__init__(self)
        self.prefetchQ = prefetchQ
        self.seqfile = seqfile
        self.format = format
        self.rpipe = reverse_pipe
        self.lock = lock
        #self.outQ = multiprocessing.Queue()  # Queue going to distributor.
        self.distributor = sequence_distributor( distribute_Q , threads , distributor_end_queue, prefetchQ)
        self.threads = threads
        self.outQ = distribute_Q
        self.distributor.start()
        self.functions = {
                'reverse'   : self.reverse,
                'forget'    : self.forget,
                'slice'     : self.slice,
                'get_all'   : self.get_all,
                'get'       : self.get,
                'Skip'      : self.skip,
                'Proceed'   : self.proceed,
                'die'       : self.die,
                }
        self.alive = False
    def load_sequences( self ):
        if self.format == 'fasta':
            desc_reg = re.compile( r'^(\S+)\s*(\S*.*)[\n\r]?$' )
            accession_info = lambda SeqRecord: desc_reg.search( SeqRecord.description ).groups()
        else:
            accession_info = lambda SeqRecord: (SeqRecord.id , SeqRecord.description)
        self.seqs = {}
        i = 0
        ids = set()
        descs = set()
        with file(self.seqfile,'r') as inFile:
            for seq in SeqIO.parse( inFile , self.format ):
                #accession, info = desc_reg.search( seq.description ).groups()
                accession, info = accession_info( seq )
                self.seqs.update( { accession : seq } )   # Just modified this to use accession rather than seq.id, in conformance with the reg exp sequence description parser in SSUMMO_tally.
                i += 1
        print 'parsed {0} sequences'.format(i)
        return

    def run(self):
        self.alive = True
        self.load_sequences()
        self.lock.acquire()
        sys.stdout.write(  "SeqDB holding {0} sequences from {1} in memory.\n".format( len(self.seqs) , self.seqfile ) )
        sys.stdout.flush()
        self.lock.release()
        #self.slice_all()  ## Uses prefetch Q.
        if self.rpipe != None:
            self.reverse()   # Enter reverse loop. This shall slice the sequences too.
        inval = self.prefetchQ.get()
        try:
            while inval != 'END':
                fn = self.functions[ inval[0] ]
                args = inval[1]
                fn( args )
                inval = self.prefetchQ.get()
        except (KeyboardInterrupt,Exception,):
            self.shutdown()
            raise
       # else:
        self.shutdown()

    def skip( self, *args ):
        """Put 'Skip' into outQ, for sequence_distributor."""
        self.outQ.put( ('Skip',None) )
        pass
    def proceed( self, *args ):
        """Put 'Proceed' into outQ, for sequence_distributor."""
        self.outQ.put( ('Proceed',None) )
        pass
    def get_all(self,*args):
        """Puts all sequences, in fasta format, into self.outQ."""
        for seq in self.seqs.itervalues():
            self.outQ.put( ('add',seq.format('fasta'),) )
        return
    def get( self, accessions ):
        """Given a list of accessions, puts each one into self.outQ."""
        for acc in accessions:
            self.outQ.put( ('add',self.seqs[acc].format('fasta')) )
        return
    def forget( self, *accessions ):
        """Deletes all accessions from memory."""
        for acc in accessions:
            del( self.seqs[ acc ] )
    def die(self,e):
        self.prefetchQ.put('END')
        if self.alive:
            self.shutdown()
        raise(e)

    def shutdown(self):
        """Closes down all threads."""
        for thread in self.threads:
            thread.inPipe.send( ('END', None, ) )
        self.outQ.put( 'END' )  ## --> sequence_distributor
        self.prefetchQ.close()  ## End SeqDBQ
        self.distributor.join()
        self.alive = False
        return
    def slice_all(self):
        """Keeps reading self.prefetchQ, expecting the tuple (accession, start, end).
        For each tuple received, this will slice the relevant sequence accordingly.
        This will continue to expect tuples until it gets "END" from self.prefetchQ.

        N.B. Sequences are sliced inclusive of given position.
        """
        inval = self.prefetchQ.get()
        while inval != 'END':
            id, start, end = inval
            self.slice( id, start, end+1 )
            inval = self.prefetchQ.get()
        return

    def slice( self , accession, start, end ):
        """This will slice sequence with seq.id accession, from
        start to end, INCLUSIVE. i.e. using python indexing [start -1 : end + 1].
        This was chosen to comply with hmmer alignment co-ordinates.
        """
        self.seqs[ accession ] = self.seqs[ accession ][ start : end + 1 ]
    def reverse( self ):
        """Called once before traversing the directory. The input pipe (self.rpipe)
        is closed before returning None.
        """
        if self.rpipe is None:
            raise ValueError( "No reverse pipe given to seqDB!!" )
        try:
            for fasta_sequence in self.seqs.itervalues():
                self.outQ.put( ('add',fasta_sequence.format('fasta'),) )  # To distributor
        except AssertionError,e:
            return
        self.outQ.put( ('Proceed',None,) )
        to_reverse = self.rpipe.recv()
        reversing = 0
        while to_reverse != 'END':
            seq = self.seqs[to_reverse]
            self.seqs.update( { to_reverse : Bio.SeqRecord.SeqRecord(seq.seq.reverse_complement(), id = seq.id, name=seq.name ) } )  
            #seq = self.seqs[to_reverse].seq.reverse_complement()
            reversing += 1
            to_reverse = self.rpipe.recv()
        self.rpipe.close()

class sequence_distributor( Thread ):
    def __init__( self, seqDBQ, hmmsearch_threads, jobEndQ ,SeqDBInQ):
        Thread.__init__( self )
        self.inQ = seqDBQ
        self.jobEndQ = jobEndQ
        self.errorQ  = SeqDBInQ
        self.threads = hmmsearch_threads
        self.n_threads = len( self.threads )
        self.thread_nums = set( range( self.n_threads ) )
        self.fns = { 'add'     : self._addseq ,
                     'Proceed' : self._proceed,
                     'Skip'    : self._skip   ,
                     'load'    : self._load   ,
                     'die'     : self._die    ,
                   }
        self.dead = 0
        self.seqs = set()
#        self.sem = semaphore

    def _addseq( self, seq ):
        # inval <-- seqDB
        self.seqs.add( seq )
    def _die( self, e ):
        if self.dead == 0:
            self.errorQ.put( ('die' , e) )
        self.dead += 1
        return

    def _proceed( self, *args ):
        # From seqDB. Starts the cascade.
        self.threads[0].outQ.put( ('reset', self.nodes) ) # --> Scorer. Tells order of nodes to search.
        self.distribute( )
    def _skip( self, *args ):
        self.seqs = set()
        #self.threads[0].outQ.put( ( 'reset', self.nodes ) )
    def _load( self, directory , nodes ):
        # inval <-- SSUMMO / Preparer.reverser
        self.seqs = set()
        self.nodes = nodes
        self.paths = [ os.path.join( directory, node, node + '.hmm' ) for node in nodes ]
    def run( self ):
        self.threadInd = 0
        inval = self.inQ.get()
        try:
            while inval != 'END':
                fn = inval[0]
                args = inval[1:]
                self.fns[fn]( *args )
                inval = self.inQ.get()
                continue
        except Exception,e:
            self._die(e)
            raise(e)
        self.inQ.close()

    def distribute( self ):
        self.threadInd = 0
        num_nodes = len(self.paths)
        num_seqs = len( self.seqs )
        self.thread_indices = {}
        min_val = min( [num_nodes, self.n_threads] )

        # Start as many processes as possible, constrained by number of nodes or available processes.
        for i in xrange( min_val ):
            self.threads[i].inPipe.send( [ num_seqs , self.paths[i] ] )
            self.thread_indices.update( { self.nodes[i] : i } )
            self.threads[i].inPipe.send( self.seqs )
            self.threadInd += 1
            #self.sem.acquire()
        # so if there's more nodes than HMMSearchThread's.
        if min_val != num_nodes:  
            # This loop covers submitting jobs for the remaining nodes, so long as there's a free thread. #
            for index in xrange( min_val , num_nodes ):  
                self.get_free_thread()
                #self.sem.acquire()   # Removed semaphore implementation. (get_free_thread limits number of processes instead)
                self.threads[self.threadInd].inPipe.send( [ num_seqs , self.paths[index] ] )
                self.thread_indices.update( { self.nodes[index] :self.threadInd } )
                self.threads[self.threadInd].inPipe.send( self.seqs )
            # Get the results.
            for i in xrange( min_val ):
                self.thread_indices.pop( self.jobEndQ.get() )  #  Must come from Scorer.
        else:
            for i in xrange( num_nodes ): # === xrange( min_val )
                self.thread_indices.pop( self.jobEndQ.get() )  #
        return

    def get_free_thread( self ):
        node_name = self.jobEndQ.get()
        self.threadInd = self.thread_indices.pop( node_name )
        return

class Preparer( ):
    def __init__( self ):
        """After initialising, just need to call the prepare()
        method.
        
        prepare() tries to start the seqDB process as well as
        starting the hmmsearch threads.
        
        Once these are ready, start_threads() returns and
        _cascade() is called.

        _cascade enters the main SSUMMO loop by calling any methods
        defined in self.premethods, which is a list of function
        calls.
        
        We also have lists of function calls for self.recursemethods
        and self.postmethods.

        The default methods are:-
        premethods = [ self.reverser , self.find_start , self._print_nodes ]
        recursemethods = []
        postmethods = []

        By default, the only recurse method is determined by self.find_start,
        depending on CONFIG.options['-start'] (usually given on command line).
        """
        self.seqDB = seqDB
        self.premethods = [ self.reverser , self.find_start , self._print_nodes ]
        self.recursemethods = []
        self.postmethods = []
        self.START_DIR = str()
        self.START_NODE = dict()
        self.DIVERGE_DICT = dict()
        self.RESULTS = dict()

    #def call_iter( self , results , start_node , start_dir , diverge_dict ):
    def call_iter( self ):
        for node in self.RESULTS.keys():
            if node == 'accessions':
                continue
            self.RESULTS[node], self.DIVERGE_DICT = SSUMMO(self.START_NODE[node], os.path.join( self.START_DIR, node ), self.RESULTS[ node ], self.seqDB, self.score_pipe_child,diverge_dict=self.DIVERGE_DICT)
        return
        #return results , diverge_dict

    #def call( self, results , start_node , start_dir , diverge_dict ):
    def call( self ):
        self.RESULTS, self.DIVERGE_DICT = SSUMMO( self.START_NODE , self.START_DIR , self.RESULTS, self.seqDB , self.score_pipe_child , diverge_dict=self.DIVERGE_DICT)
        self.RESULTS = self.shuffle_back( self.RESULTS , self.START_DIR  )
        return
        #return self.results, diverge_dict

    def shuffle_back( self , results , start_dir ):
        full_results = {}
        node_path = start_dir[ len( CONFIG.arbDBdir ):].strip( os.path.sep ).split( os.path.sep )
        for node in node_path:
            full_results.update( { node : {} } )
        full_results.update( results )
        return full_results

    def find_start( self ):
        """If the -start option was given, then this shuffles the results
        along accordingly and also calls SSuMMo on a single node, whereas 
        at the root, need to iterate through all 3 domains.

        This therefore sets self.start_node and self.start_dir, indicating
        which node we should start SSuMMo.
        """
        tdict = getTaxIndex(silent=True)
        self.START_NODE, self.START_DIR = findStart(tdict )
        if self.START_DIR not in [ CONFIG.arbDBdir , 'Bacteria','Archaea','bac','arc' ]:
            self.RESULTS, self.START_NODE = self.shuffle_along( self.RESULTS,self.START_DIR, self.START_NODE )
            print "# starting at '{0}'".format(self.START_DIR)
            self.recursemethods.append( self.call )
        else:
            self.recursemethods.append( self.call_iter )

    def _cascade( self ):
        t0 = time.time()
        # Counting for debugging purposes.
        n_before = len( get_accessions( self.RESULTS , accessions=[] ) )

        for method in self.premethods:
            method()
        t = time.time()

        # Counting for debugging purposes.
        n_mid = len( get_accessions( self.RESULTS , accessions = [] ) )

        for method in self.recursemethods:
            method()
        #self.RESULTS, self.DIVERGE_DICT = self.caller( self.RESULTS, self.START_NODE , self.START_DIR , self.DIVERGE_DICT )

        # Counting for debugging purposes.
        n_after = len( get_accessions( self.RESULTS , accessions = [] ) )
        
        for method in self.postmethods:
            method()

        t = time.time() - t0

        # Counting for debugging purposes.
        n_final = len( get_accessions( self.RESULTS , accessions = [] ) )

        self.shutdown()
        print '# before shuffle',n_before
        print '# before SSUMMO',n_mid
        print '# after SSUMMO' , n_after
        print '# after merge', n_final
        print "##\tprocessed {0} sequences in {1} seconds".format(countseqs(CONFIG.arbDBdir, file_name = self.seq_file_name), t)
    	return self.RESULTS

    def _final_merge( self ):
        self.RESULTS = finalMerge( self.RESULTS, self, self.DIVERGE_DICT )

    def _print_nodes( self ):
        for node in self.RESULTS.keys():
            if node == 'accessions':
                continue
            print_tree_line( node, self.RESULTS, 0 )

    def _start_threads( self ):
        self.threads = []
        thread_pipes = [multiprocessing.Pipe() for i in xrange( int(CONFIG.options['-ncpus']))]
        self.score_pipe_parent,self.score_pipe_child = multiprocessing.Pipe()
        self.hmmsearch_outQ = multiprocessing.Queue()
        self.lock = multiprocessing.RLock()
        for i in xrange(int(CONFIG.options['-ncpus'])):
            self.threads.append( HMMSearchThread( thread_pipes[i][0], thread_pipes[i][1], self.hmmsearch_outQ ,self.lock) )
        CONFIG.printHeaders = True  ## Doesn't actually work. In case we wanted to print hmmsearch results.
        for thread in self.threads:  
            thread.start()
        self.seq_file_name = CONFIG.options['-in']

        #### Create queues & pipes.
        self.prefetchQ = multiprocessing.Queue() # Send accessions for seqDB prefetching.
        self.distribute_Q = multiprocessing.Queue()
        distrib_end_Q = multiprocessing.Queue()
        self.reverse_pipe_in , self.reverse_pipe_out = multiprocessing.Pipe()

        #### Create processes / threads (calls __init__ on each one).
        self.seqDB = seqDB(self.seq_file_name, self.prefetchQ, self.distribute_Q, distrib_end_Q, self.threads, self.lock,reverse_pipe = self.reverse_pipe_out , format=CONFIG.options['-format'])
        self.scorer = Scorer( self.hmmsearch_outQ, self.score_pipe_parent, distrib_end_Q )

        #### Start them.
        self.scorer.start()
        self.seqDB.start()
        return

    def prepare( self ):
        """Given the name of a sequence file, makes a blast database
        and provides the entry point for the SSUMMO algorithm. Returns
        a dictionary of results containing taxonomic nodes with matches
        and at each node the accessions that have been assigned there.
        """
        try:
            print 'starting threads'
            self._start_threads()
            results = self._cascade()
        except (KeyboardInterrupt,):
            sys.stderr.write('SHUTTING DOWN...\n')
            self.shutdown()
            raise
        except Exception:
            raise
        else:
            return results

    def shutdown( self ):
        try:
            self.seqDB.prefetchQ.put('END')  ## SeqDB will end Distributor. Distributor will end hmmsearch threads.
            self.hmmsearch_outQ.put('END')  ## Scorer inQ
            self.seqDB.join()
            self.scorer.join()
            for thread in self.threads:  # Is it quicker to do in two loops or one?
                thread.join()
        except AttributeError:
            pass

    def shuffle_along( self , results_dict , start_dir ,start_node):
        rel_start = start_dir[ len( CONFIG.arbDBdir ) : ].strip( os.path.sep )
        rel_path = rel_start.split( os.path.sep )
        first = rel_path[0]
        accessions = [ accession for accession in get_accessions( results_dict[ first ], accessions=[] ) ]
        for path in rel_path:
            results_dict = results_dict[path]
        results_dict.update( { 'accessions' : accessions } )
        return results_dict, start_node

        last_dir = os.path.realpath(CONFIG.arbDBdir).rstrip( os.path.sep ).rsplit( os.path.sep,1 )[-1]
        start_dir_list = os.path.realpath(start_dir).rstrip( os.path.sep ).split( os.path.sep )
        if last_dir == start_dir_list[-1]:
            folders = []
        else:
            try:
                # Try to find the nodes in relation to the arbDBdir.
                folders = start_dir_list[ start_dir_list.index( last_dir ) + 1: ]
            except ValueError:
                raise ValueError( 'arbDBdir in CONFIG.py ({0}) must start with same file path as -start ({1})'.format( CONFIG.arbDBdir , start_dir  ))
        node = results_dict
        #start_nodes = set( [folder for folder in folders] )
        start_nodes = set( start_node.keys() )
        for folder in folders:
            ## Delete irrelevant nodes from start_node.
            node_names = set( node.keys() )
            other_nodes = node_names.difference( start_nodes )
            for other_node in other_nodes:
                del( node[other_node] )
            ## Move into next node.
            if 'accessions' in node:
                node.update( { folder : { 'accessions': node.pop('accessions') } } )
            try:
                node = results_dict[folder]
                start_node = start_node[folder]
            except KeyError,e:
                sys.stderr.write( repr(e) + '\n' )
        return results_dict, start_node

    def reverser( self ):
        nodes = { 'Archaea' : {},
                'Bacteria' : {},
                'Eukaryota' : {},
                'rArchaea' : {},
                'rBacteria' : {},
                'rEukaryota' : {} }
        self.distribute_Q.put( ('load',CONFIG.arbDBdir, sorted(nodes.keys()) , ) )  ## --> sequence_distributor
        results, self.DIVERGE_DICT = score2( self.score_pipe_child, nodes, CONFIG.arbDBdir )
        self.RESULTS = self._reverse( results )
        return 

    def _reverse( self, results):
        Ireversed = 0
        total = 0
        delete = []
        for node in sorted(results.keys()):
            self.Write( '##  In {0}, '.format( node ) )
            if node[:1] == 'r':  # top match is a reverse node.
                choice = node[1:]
                if 'accessions' in results[node].keys():
                    accessions = results[node].pop( 'accessions' )
                    self.Print( "there's {0} sequences:-".format( len( accessions ) ) )
                    for accession in accessions:
                        Ireversed += 1
                        self.reverse_pipe_in.send( accession )
                    if choice not in results:
                        results.update( { choice : { 'accessions' : [] } } )
                    if 'accessions' in results[choice].keys():
                        results[choice]['accessions'] +=  accessions 
                    else:
                        results[choice].update( { 'accessions' : accessions } )
                else:
                    pass
                delete.append( node )
                continue
            else:
                if 'accessions' in results[node].keys():
                    self.Print( "there's {0} sequences:-".format( len( results[node]['accessions'] ) ) )
                else:
                    delete.append( node )
                continue
        self.reverse_pipe_in.send( 'END' )
        for node_to_go in delete:
            del( results[node_to_go] )
        delete = []
        for node in results:
            if 'accessions' not in results[node].keys():
                delete.append( node )
            else:
                total += len( results[node]['accessions'] )
        for node_to_go in delete:
            del( results[node_to_go] )
        if Ireversed > 0:
            self.Print( '# Reversed {0} sequences'.format( Ireversed ) )
        return results
    def Print( self, string ):
        self.lock.acquire()
        print string
        self.lock.release()
    def Write( self, string ):
        self.lock.acquire()
        sys.stdout.write( string )
        sys.stdout.flush()
        self.lock.release()

class LocalOptions( Options ):
    def __init__( self,args=None ):
        self.options = {
                    '-start' : CONFIG.arbDBdir,
                    '-in' : [] ,
                    '-out': None , 
                    '-ncpus': multiprocessing.cpu_count() - 1,
                    '-Eval': str(10),
                    '-format': 'fasta',
                    '-score': str(1),
                    '-servers': ('localhost'),
                    '--createXML' : False,
                    '--verbose' : False,
                    '--max':False
                }
        self.helpTxt = {'-start' : 'Start node for SSUMMO. Can be a domain, or deeper, but must start at least with the domain',
                    '-in' : 'Query sequence file.',
                    '-out': 'Output results file names. Suffix will change. [Optional - prefix inferred from -in]',
                    '-ncpus':'Number of worker processes to initiate. [CPU count - 1]',
                    '-format':'Input file sequence format [fasta].',
                    '-servers': 'Server names. [Not implemented yet].',
                    '-Eval': 'hmmsearch Evalue threshhold [10]',
                    '-score':'hmmsearch score threshold [1]'
                }
        self.switches = {
                    '--createXML':'Create phyloxml output?? [No]',
                    '--verbose' : 'Print HMMer results [No!]',
                    '--max' : 'Use hmmsearch --max flag (bypass filters that remove sequences from full scoring set). [no]'
            }
        self.useage = "python path/to/SSUMMOv06.py [option [arg]] ..."
        self.example = self.useage
        self.singleargs = [ '-ncpus','-servers','--createXML','-start','-format','--verbose','--max','-score','-Eval']
        self.multiargs = [ '-in','-out' ]
        if args is not None:
            self.localParseArgs( args )
    def __iter__( self ):
        for i in self.options:
            yield i
    def localParseArgs(self,args):
        self.options = self.parseArgs( args )
        if self['-out'] is None:
            self.options.update( { '-out' : [] } )
            for option in self.options['-in']:
                if '.' in option:
                    prefix , suffix = option.rsplit('.',1)
                else:
                    prefix = option
                self.options['-out'].append( prefix + '.' )
        if len( self['-out'] ) != len ( self['-in'] ):
            raise IOError( 'Must supply the same number of options to -in as to -out\n.Got {0} and {1}, respectively.'.format(len( self['-in']) , len(self['out']) ) )

def save( save_name , object ):
    with file( save_name,'wb' ) as save_file:
        pickle.dump( object, save_file , -1 )
        print "##\tSaved object to '{0}'".format( save_file.name )

def main( options ):
    t0 = time.time()
    ins, outs = ( options['-in'], options['-out'], )
    CONFIG.options = options
    n_files = len( ins )
    for file_ind in xrange( n_files ):
        t1 = time.time()
        #CONFIG.options = options.deepcopy()
        CONFIG.options.options['-in' ] = ins[file_ind ]
        CONFIG.options.options['-out'] = outs[file_ind]
        Iprepare = Preparer()
        results_dict = Iprepare.prepare( )
        print "##\t{0} ambiguous results".format(CONFIG.ambiguousCount)
        ### Save results files
        try:
            save( '{0}.pkl'.format(CONFIG.options['-out'].rstrip('.')) , results_dict )
        except IOError:
            if sys.stdin.closed:
                suffix = 'results'
            else:
                suffix = 'results'
                if len(suffix) == 0:
                    suffix = 'results'
            save_name = suffix + '.pkl'
            i = 0
            while os.path.exists( save_name ):
                i += 1
                save_name = '{0}{1}.pkl'.format( suffix,i )
            print '##\tSaving to {0}'.format( save_name )
            CONFIG.options['-out'] = save_name
            save( save_name , results_dict )
        if options['--createXML']:
            startNode,startDir = findStart( getTaxIndex())
            print "##\tWriting phyloxml file to '{0}.xml'".format(options['-out'])
            with file('{0}.xml'.format(options['-out']),'w') as write_handle:
                write_xml(startDir,write_handle,results_dict)
        t = time.time()
        mins = int(t-t0) / 60
        print "##\Finished {0} in {1} mins{2} secs".format( CONFIG.options['-in'], mins, (t - t0) - (mins*60))
    t = time.time()
    mins = int(t-t0) / 60
    print "##\tFinished everything in {0} mins{1} secs".format( mins, (t - t0) - (mins*60))


if __name__ == '__main__':
    args = sys.argv[1:]
    CONFIG.options = LocalOptions( args )
    #cProfile.run( 'main( CONFIG.options )' )
    main( CONFIG.options )
