#!/usr/bin/env python

# SSuMMo imports
from ssummo.hmmer.hmmsearch import HmmSearcher, HmmSearchParser
from ssummo.phyloxml import write_xml
from ssummo.count_hmms import countseqs
from ssummo.ssummolib import load_index, SsummoOptions
from ssummo.traverse import dict_walk, get_accessions
from ssummo import CONFIG

# standard library imports
import multiprocessing
import os
import re
import subprocess
import sys
from threading import Thread
import time
try:
    import cPickle as pickle
except ImportError:
    import pickle

# 3rd party imports
import Bio.SeqRecord
from Bio import SeqIO

CONFIG.ambiguousCount = 0

class Scorer(Thread):
    """
    Let's put (through inQ) results here as they're produced by
    parseHMMThread. Scorer will continue to collate and order results
    as they're produced. As soon as all hmmsearch results for a sequence
    are ready, then they're piped to score.
    """

    n_done = 0
    n_to_do = 0

    def __init__(self, hmmsearch_outQ, result_pipe, distrib_end_queue, seq_DbQ):
        Thread.__init__(self)
        self.inQ = hmmsearch_outQ
        self.out_pipe = result_pipe
        self.d_Q = distrib_end_queue
        self.seq_DbQ = seq_DbQ
        self.fns = {'end'    : self._end,
                    'update' : self._update,
                    'reset'  : self._reset,
                    'die'    : self._die,
                   }
        self.err_count = 0

    def _die(self, error, OTUName=''):
        self.d_Q.put( OTUName )
        self.err_count += 1
        self.n_done += 1
        if CONFIG.debug:
            print('[Scorer.die] {0} dead: {1} to die: {2}'
                  .format(OTUName, self.err_count, self.n_to_do))
        if self.n_done == self.n_to_do:
            self.seq_DbQ.put(('die', error))  # --> SeqDB
            self.out_pipe.send(error)         # --> score

    def _end(self, OTU):
        # Orders from HmmSearcher
        self.n_done += 1
        self.d_Q.put( OTU )                  # --> distributor
        if self.n_done == self.n_to_do:
            for accession in self.results.keys():
                self.out_pipe.send( ( accession, self.results.pop(accession), ) )
            self.out_pipe.send( 'end' )      # --> score
            del( self.results )

    def _reset(self, nodes):
        # from SSUMMO / reverser
        self.results = {}
        self.n_accessions_got = {}
        self.results_val = []
        self.hmm_indices = {}
        self.out_pipe.send( nodes )          #  --> score
        self.n_to_do = len( nodes )
        for i, node in enumerate( nodes ):
            self.hmm_indices.update( { node : i } )
            self.results_val.append( float() )
        self.n_done = 0

    def _update(self, OTU, results):
        # Ordered by HmmSearcher
        (accession, Eval, score, ) = (results['accession'], results['Eval'], results['score'], )
        HMM_index = self.hmm_indices[ OTU ]
        ## Update result for this accession.
        if accession in self.results:
            self.results[accession][0][ HMM_index ] = Eval
            self.results[accession][1][ HMM_index ] = score
            self.n_accessions_got[accession] += 1
            #if self.n_accessions_got[accession] == self.n_to_do:
            #    self.out_pipe.send( (accession, self.results.pop( accession ), ) )   #  -->  score
        else:
            self.results.update( { accession : [ self.results_val[:], self.results_val[:] ] } )
            self.n_accessions_got.update( { accession : 1 } )
            self.results[accession][0][ HMM_index ] = Eval
            self.results[accession][1][ HMM_index ] = score
        return

    def run(self):
        inval = self.inQ.get()
        while inval != 'END':
            fn = inval[0]
            args = inval[1:]
            self.fns[fn]( *args )
            inval = self.inQ.get()
        #print('[Scorer.run] over')
        return

#class SeqDB( multiprocessing.Process ):
class SeqDB(Thread):
    def __init__(self, seqfile, prefetchQ, distribute_Q, distributor_end_queue,
                 threads, lock, reverse_pipe=None, format='fasta'):
        """This is the only process that reads the sequence input file.

        prefetchQ must be a multiprocessing.Queue() object. In this, you must put the
           following keywords, which will be called from their references in
           self.functions:-
               *    annotate
               *    die
               *    forget
               *    forget_inverse
               *    get
               *    get_all
               *    reverse
               *    separate
               *    slice
               *    Skip
               *    Proceed
        See the help for the relevant function to see what they do.
        You can add you own functions too.

        distribute_Q - Don't touch. Scorer uses this to synchronise finished nodes.
        threads   - list of threads which sequences shall be distributed to.
        lock      - multiprocessing.Lock() instance
        semaphore - multiprocessing.semaphore() instance. [removed semaphore implementation. Seemed unnecessary]
        reverse_pipe - Pipe used by SeqDB.reverse()
                       Send accessions down the other pipe end, and
                       this will reverse them.
        format    - format of sequences in the sequence file.
        """
        #multiprocessing.Process.__init__(self)
        Thread.__init__(self)
        self.prefetchQ = prefetchQ
        self.seqfile = seqfile
        self.format = format
        self.rpipe = reverse_pipe
        self.lock = lock
        #self.outQ = multiprocessing.Queue()  # Queue going to distributor.
        self.distributor = SequenceDistributor( distribute_Q, threads, distributor_end_queue, prefetchQ)
        self.threads = threads
        self.outQ = distribute_Q
        self.functions = {
                'annotate'  : self.annotate,
                'die'       : self.die,
                'forget'    : self.forget,
                'forget_inverse': self.forget_inverse,
                'get'       : self.get,
                'get_all'   : self.get_all,
                'reverse'   : self.reverse,
                'separate'  : self.separate,
                'slice'     : self.slice,
                'Skip'      : self.skip,
                'Proceed'   : self.proceed,
                }
        self.alive = False

    def annotate(self, outfile):
        """SeqDB knows nothing of the taxonomic assignments, just accessions and
        sequences. We therefore read tuple pairs of:
                    ( <accession>, <assignment> )
        through self.prefetchQ, until the tuple ( False, False ) is read.

        When calling annotate (by sending the string 'annotate' through self.prefetchQ),
        also give the name of the output sequence file. Sequences are saved in the same
        format as they were read.

        Once all assigned sequences have been saved, the other sequences which were not
        assigned to a taxa are appended to the input file. If this is undesired, use
        separate instead.
        """
        accessions = self.separate(outfile)
        print('#\tSaving annotated sequences to {0}'.format(outfile))
        close = lambda: None
        if not hasattr( outfile, 'write' ):
            try:
                outhandle = file( outfile, 'w' )
                close = handle.close
            except IOError:
                print("can't write to {0}. dumping to standard out instead")
                time.sleep(1)
                outhandle = sys.stdout
        elif hasattr(outfile, 'closed') and outfile.closed:
            # closed file handle. Let's append to it
            outhandle = file(outfile.name, 'a')
        else:
            # valid file handle
            outhandle = outfile
        try:
            with file(self.seqfile, 'r') as inhandle:
                for seq in SeqIO.parse(inhandle, self.format):
                    if seq.id in accessions:
                        accessions.remove(seq.id)
                    else:
                        outhandle.write(seq.format(self.format))
        finally:
            close()

    def die(self, e):
        #self.prefetchQ.put('END')
        self.outQ.put( 'END' )   # --> SequenceDistributor
#        if self.alive:
#            self.shutdown()
        raise IOError('SeqDB dying. Given error (prob from Scorer):\n', e)

    def forget(self, accessions):
        """Deletes all provided accessions from memory."""
        for acc in accessions:
            del( self.seqs[ acc ] )

    def forget_inverse(self, accessions):
        to_forget = set( self.seqs.keys() ).difference( set( accessions ) )
        self.forget( to_forget )

    def get(self, accessions):
        """Given a list of accessions, puts each one into self.outQ."""
        for acc in accessions:
            self.outQ.put( ('add', self.seqs[acc].format('fasta')) )
        return

    def get_all(self, *args):
        """Puts all sequences, in fasta format, into self.outQ."""
        for seq in self.seqs.itervalues():
            self.outQ.put( ('add', seq.format('fasta'), ) )
        return

    def load_sequences(self):
        if self.format == 'fasta':
            desc_reg = re.compile(r'^\s*(\S+)\s*(\S*.*)[\n\r]?$')
            acc_info = lambda SeqRecord: desc_reg.search(SeqRecord.description).groups()
        else:
            acc_info = lambda SeqRecord: (SeqRecord.id, SeqRecord.description)
        self.seqs = {}
        ids = set()
        descs = set()
        self.distributor.start()
        with file(self.seqfile, 'r') as inFile:
            for seq in SeqIO.parse(inFile, self.format):
                accession, info = acc_info(seq)
                # Just modified this to use accession rather than seq.id, in
                # conformance with the reg exp sequence description parser in
                # SSUMMO_tally.
                self.seqs.update({ accession : seq })
        return

    def proceed(self, *args):
        """Put 'Proceed' into outQ, for SequenceDistributor."""
        self.outQ.put(('Proceed', None))
        pass

    def reverse(self):
        """Called once before traversing the directory. The input pipe (self.rpipe)
        is closed before returning None.
        """
        if self.rpipe is None:
            raise ValueError( "No reverse pipe given to SeqDB!!" )
        for fasta_sequence in self.seqs.itervalues():
            self.outQ.put(('add', fasta_sequence.format('fasta'), ))  # To distributor
        self.outQ.put(('Proceed', None, ))
        #print('[SeqDB.reverse] waiting for distributor.rpipe')
        to_reverse = self.rpipe.recv()
        #print('[SeqDB.reverse] recv\'d ', to_reverse)
        reversing = 0
        while to_reverse != 'END':
            seq = self.seqs[to_reverse]
            self.seqs.update( { to_reverse : \
                        Bio.SeqRecord.SeqRecord(seq.seq.reverse_complement(),
                        id = seq.id, name=seq.name ) } )
            reversing += 1
            to_reverse = self.rpipe.recv()
        self.rpipe.close()

    def run(self):
        self.alive = True
        self.load_sequences()
        with self.lock:
            print("#SeqDB holding {0} sequences from {1} in memory."
                  .format(len(self.seqs), self.seqfile))
        #self.slice_all()  ## Uses prefetch Q.
        try:
            self.reverse()   # Enter reverse loop. This shall slice the sequences too.
            inval = self.prefetchQ.get()
            while inval != 'END':
                fn = self.functions[ inval[0] ]
                args = inval[1]
                fn(args)
                inval = self.prefetchQ.get()
        except (KeyboardInterrupt, Exception, ), e:
            print('SeqDB error!')
            self.die(e)
        else:
            if CONFIG.debug:
                print('##\tSeqDB.shutdown(...'),
            self.shutdown()
            if CONFIG.debug:
                print('##\tSeqDB.shutdown(...'),

    def separate(self, outfile):
        """Works similarly to SeqDB.annotate.
        Reads (accession, annotation) tuple pairs through self.prefetchQ,
        and saves them to outfile. Typically, this saves all sequences
        that could be assigned to a taxa to <outfile>.
        """
        accessions = set()
        print('#\tSaving annotated sequences to {0}'.format(outfile))
        close = lambda: None
        if not hasattr(outfile, 'write'):
            try:
                if len(outfile.rsplit('.', 1)[1]) == 0:
                    outfile = outfile + self.format
                handle = file( outfile, 'w' )
                close = handle.close
            except IOError:
                print("can't write to {0}. dumping to standard out instead")
                time.sleep(1)
                handle = sys.stdout
        elif hasattr(outfile, 'closed') and outfile.closed:
            # closed file handle append
            handle = file( outfile.name, 'a' )
        else:
            # valid file handle
            handle = outfile
        try:
            accession, annotation = self.prefetchQ.get()
            while accession is not False:
                accessions.add( accession )
                sequence = self.seqs[accession]
                sequence.description += ' {0}'.format( annotation )
                handle.write( sequence.format( self.format ) )
                accession, annotation = self.prefetchQ.get()
        finally:
            close()
        return accessions

    def shutdown(self):
        """Closes down all threads."""
        if CONFIG.debug:
            print('[SeqDB.shutdown] telling SequenceDistributor to end')
        self.outQ.put('END', timeout=1)  ## --> SequenceDistributor
        for thread in self.threads:
            thread.inPipe.send(('END', None))
        thread.outQ.put('END', timeout=1)
        #print('[SeqDB.shutdown] joining SequenceDistributor')
        #self.prefetchQ.close()  ## End SeqDBQ
        #self.distributor.join()
        #print('[SeqDB.shutdown] done.')
        self.alive = False
        return

    def skip(self, *args):
        """Put 'Skip' into outQ, for SequenceDistributor."""
        self.outQ.put(('Skip', None))
        pass

    def slice(self, accession, start, end):
        """This will slice sequence with seq.id accession, from
        start to end, INCLUSIVE. i.e. using python indexing [start -1 : end + 1].
        This was chosen to comply with hmmer alignment co-ordinates.
        """
        self.seqs[accession] = self.seqs[accession][ start : end + 1 ]

    def slice_all(self):
        """Keeps reading self.prefetchQ, expecting the tuple (accession, start, end).
        For each tuple received, this will slice the relevant sequence accordingly.
        This will continue to expect tuples until it gets "END" from self.prefetchQ.

        N.B. Sequences are sliced inclusive of given position.
        """
        inval = self.prefetchQ.get()
        while inval != 'END':
            id, start, end = inval
            self.slice(id, start, end)
            inval = self.prefetchQ.get()
        return

class SequenceDistributor(Thread):
    def __init__(self, seq_DbQ, hmmsearch_threads, jobEndQ, SeqDBInQ):
        Thread.__init__( self )
        self.inQ = seq_DbQ
        self.jobEndQ = jobEndQ
        self.errorQ  = SeqDBInQ
        self.threads = hmmsearch_threads
        self.n_threads = len(self.threads)
        self.thread_nums = set( range(self.n_threads) )
        self.fns = { 'add'     : self._addseq ,
                     'Proceed' : self._proceed,
                     'Skip'    : self._skip   ,
                     'load'    : self._load   ,
                     'reset'   : self._reset  ,
                     'die'     : self._die    ,
                   }
        self.dead = False
        self.seqs = set()
#        self.sem = semaphore

    def _addseq( self, seq ):
        # inval <-- SeqDB
        self.seqs.add( seq )

    def _die( self, e=None, OTUName=None ):
        import Queue
        if not self.dead:
            sys.stderr.write('[sequencedistributor.die] dieing to errorQ\n', e)
            try:
                self.errorQ.put(('die', e), timeout=1)
            except Queue.Empty, e2:
                print('[sequencedistributor] error!')
                print('{0}\n{1}'.format(e2, e))
                raise
        self.jobEndQ.put(('die', e), timeout=1)
        self.dead = True
        return

    def _proceed( self, *args ):
        # From SeqDB. Starts the cascade.
        self.threads[0].outQ.put( ('reset', self.nodes) ) # --> Scorer. Tells order of nodes to search.
        self.distribute( )

    def _reset( self, *args ):
        self.seqs = set()
        self._load( *args )

    def _skip( self, *args ):
        self.seqs = set()
        #self.threads[0].outQ.put( ( 'reset', self.nodes ) )

    def _load( self, directory, nodes ):
        # inval <-- SSUMMO / Application.reverser
        self.nodes = nodes
        self.paths = [ os.path.join( directory, node, node + '.hmm' ) for node in nodes ]

    def run( self ):
        self.threadInd = 0
        inval = self.inQ.get()
        try:
            while inval != 'END':
                fn = inval[0]
                args = inval[1:]
                self.fns[fn]( *args )
                inval = self.inQ.get()
                continue
        except Exception:
            self._die()
            raise
        finally:
            self.inQ.close()

    def distribute( self ):
        self.threadInd = 0
        num_nodes = len(self.paths)
        num_seqs = len( self.seqs )
        self.thread_indices = {}
        min_val = min( [num_nodes, self.n_threads] )
        #print('[distributor.distrute] initiating {0} hmmsearch threads'.format(min_val))
        # Start as many processes as possible, constrained by number of nodes or available processes.
        for i in xrange( min_val ):
            self.threads[i].inPipe.send( [ num_seqs, self.paths[i] ] )  # -->
            self.thread_indices.update( { self.nodes[i] : i } )
            self.threads[i].inPipe.send( self.seqs )    # --> HmmSearcher
            self.threadInd += 1
            #self.sem.acquire()
        # so if there's more nodes than HmmSearcher's.
        #print('[distributor.distrute] initiated {0} threads. {1} left'.format(self.threadInd, num_nodes - min_val))
        if min_val != num_nodes:
            # This loop covers submitting jobs for the remaining nodes, so long as there's a free thread. #
            #print('[distributor.distribute] trying to initiate remaining {0} threads'.format(num_nodes - min_val))
            for index in xrange(min_val, num_nodes):
                self.get_free_thread()
                #self.sem.acquire()   # Removed semaphore implementation. (get_free_thread limits number of processes instead)
                self.threads[self.threadInd].inPipe.send( [ num_seqs, self.paths[index] ] )   # -->
                self.thread_indices.update( { self.nodes[index] :self.threadInd } )
                self.threads[self.threadInd].inPipe.send( self.seqs )  # -->  HMMThread
            # Get the results.
            #print('[distributor.distrute] waiting for another {0} processes to end'.format(min_val))
            for i in xrange( min_val ):
                self.thread_indices.pop( self.jobEndQ.get() )          #  <-- 'OTUName' from Scorer.
        else:
            #print('[distributor.distrute] waiting for another {0} nodes to end'.format(num_nodes))
            for i in xrange( num_nodes ): # === xrange( min_val )
                self.thread_indices.pop( self.jobEndQ.get() )          #  <-- 'OTUName' from Scorer
        #print('[distributor.distrute] ending')
        return

    def get_free_thread( self ):
        node_name = self.jobEndQ.get()  # <-- Scorer
        self.threadInd = self.thread_indices.pop( node_name )
        return

class Application( ):
    def __init__(self, options=None):
        """After initialising, just need to call the enter_loop() method.

        enter_loop() tries to start the SeqDB process as well as starting the
        hmmsearch threads.

        Once these are ready, start_threads() returns and _cascade() is called.

        _cascade enters the main SSUMMO loop by calling any methods defined in
        self.premethods, which is a list of function calls.

        We also have lists of function calls for self.recursemethods and
        self.postmethods.

        The default methods are:-

        premethods = [ self.reverser, self.find_start, self._print_nodes ]
        recursemethods = []
        postmethods = []

        By default, the only recurse method is determined by self.find_start,
        depending on CONFIG.options['-start'] (which can be defined on the
        command line).

        """
        self.SeqDB = SeqDB
        self.premethods = [ self.reverser, self.find_start, self._print_nodes ]
        self.recursemethods = []
        self.postmethods = []
        if options:
            if options['--annotate']:
                self.postmethods = [self.annotate]
            if options['--separate']:
                self.postmethods.append(self.separate)
                pass
        self.START_DIR = str()
        self.START_NODE = dict()
        self.DIVERGE_DICT = dict()
        self.RESULTS = dict()

    def annotate(self):
        if '.' in CONFIG.options['-out']:
            infile, suffix = CONFIG.options['-out'].split('.')
            outfile = '{0}_SSUMMO_annotated.{1}'.format( infile, suffix )
        else: outfile = sys.stdout
        self.SeqDB.prefetchQ.put( ('annotate', outfile) )
        self._iter_results()

    def call(self):
        self.RESULTS, \
            self.DIVERGE_DICT = SSUMMO( self.START_NODE,
                                        self.START_DIR,
                                        self.RESULTS,
                                        self.seq_db,
                                        self.score_pipe_child,
                                        diverge_dict=self.DIVERGE_DICT)
        self.RESULTS = self.shuffle_back(self.RESULTS, self.START_DIR)
        return
        #return self.results, diverge_dict

    def call_iter(self):
        for node in self.RESULTS.keys():
            if node == 'accessions':
                continue
            self.RESULTS[node], \
                self.DIVERGE_DICT = SSUMMO(self.START_NODE[node],
                                           os.path.join( self.START_DIR, node ),
                                           self.RESULTS[ node ],
                                           self.seq_db,
                                           self.score_pipe_child,
                                           diverge_dict=self.DIVERGE_DICT )
        return
        #return results, diverge_dict

    def _cascade(self):
        t0 = time.time()
        # Counting for debugging purposes.
        n_before = len(get_accessions(self.RESULTS, accessions=[]))
        for method in self.premethods:
            method()
        t = time.time()
        # Counting for debugging purposes.
        n_mid = len(get_accessions(self.RESULTS, accessions=[]))
        for method in self.recursemethods:
            method()
        #self.RESULTS, self.DIVERGE_DICT = self.caller( self.RESULTS, self.START_NODE, self.START_DIR, self.DIVERGE_DICT )
        # Counting for debugging purposes.
        n_after = len(get_accessions(self.RESULTS, accessions=[]))
        for method in self.postmethods:
            method()
        t = time.time() - t0
        # Counting for debugging purposes.
        n_final = len(get_accessions(self.RESULTS, accessions=[]))
        print("##\tRetrieved {0} sequences from {1}, in {1} seconds"
              .format(n_final, n_before, t))
        return self.RESULTS

    def enter_loop(self):
        """Given the name of a sequence file, makes a blast database
        and provides the entry point for the SSUMMO algorithm. Returns
        a dictionary of results containing taxonomic nodes with matches
        and at each node the accessions that have been assigned there.
        """
        try:
            self._start_threads()
            results = self._cascade()
        except (KeyboardInterrupt, ):
            sys.stderr.write('[main thread] SHUTTING DOWN...\n')
            raise
        except Exception:
            raise
        else:
            return results
        finally:
            self.shutdown()

    def find_start(self):
        """If the -start option was given, then this shuffles the results
        along accordingly and also calls SSuMMo on a single node, whereas 
        at the root, need to iterate through all 3 domains.

        This therefore sets self.start_node and self.start_dir, indicating
        which node we should start SSuMMo.
        """
        tdict = get_tax_index(silent=True)
        self.START_NODE, self.START_DIR = find_start(tdict )
        if self.START_DIR not in [CONFIG.arbDBdir, 'Bacteria', 'Archaea', 'bac', 'arc']:
            self.RESULTS, self.START_NODE = \
                self.shuffle_along(self.RESULTS, self.START_DIR, self.START_NODE)
            print("# starting at '{0}'".format(self.START_DIR))
            self.recursemethods.append( self.call )
        else:
            self.recursemethods.append( self.call_iter )

    def _final_merge(self):
        self.RESULTS = final_merge( self.RESULTS, self, self.DIVERGE_DICT )

    def _forget(self, results):
        """Finds all accessions in results, and gets the SeqDB instance to forget
        the rest."""
        accessions = []
        for result in results.values():
            if 'accessions' in result:
                accessions += result['accessions'] 
        self.seq_db.prefetchQ.put(( 'forget_inverse', accessions ))
        return

    def _iter_results(self):
        """Called by annotate and separate to send accessions and their
        taxonomy through SeqDB.prefetchQ
        """
        unknowns = re.compile(r'(unidentified)|(incertae)|(unknown)|(uncultured)', re.I)
        for path, node in dict_walk('', self.RESULTS ):
            if 'accessions' in node:
                taxpath = path.split( os.path.sep )
                if unknowns.search( taxpath[-1] ):
                    taxnode = '[{0}]'.format(', '.join( taxpath[-1:-3:-1] ))
                else:
                    taxnode = '[{0}]'.format(taxpath[-1])
                for accession in node['accessions']:
                    self.seq_db.prefetchQ.put( ( accession, taxnode ) )
        self.seq_db.prefetchQ.put( (False, False) )
        return

    def _print_nodes(self):
        for node in self.RESULTS.keys():
            if node == 'accessions':
                continue
            print_tree_line( node, self.RESULTS, 0 )

    def reverser(self):
        nodes = { 'Archaea'    : {},
                  'Bacteria'   : {},
                  'Eukaryota'  : {},
                  'rArchaea'   : {},
                  'rBacteria'  : {},
                  'rEukaryota' : {} }
        try:
            self.distribute_Q.put( ('load', CONFIG.arbDBdir, sorted(nodes.keys()), ) )  ## --> SequenceDistributor
            results, self.DIVERGE_DICT = score( self.score_pipe_child, nodes, CONFIG.arbDBdir )
            self.RESULTS = self._reverse( results )
        finally:
            self.reverse_pipe_in.send( 'END' ) # --> SeqDB
        return 

    def _reverse(self, results):
        Ireversed = 0
        total = 0
        delete = []
        self._forget( results )
        for node in sorted(results.keys()):
            self.Write( '##  In {0}, '.format( node ) )
            if node[:1] == 'r':  # top match is a reverse node.
                choice = node[1:]
                if 'accessions' in results[node].keys():
                    accessions = results[node].pop( 'accessions' )
                    self.Print( "there's {0} sequences:-".format( len( accessions ) ) )
                    for accession in accessions:
                        Ireversed += 1
                        self.reverse_pipe_in.send( accession )
                    if choice not in results:
                        results.update( { choice : { 'accessions' : accessions } } )
                    else:
                        if 'accessions' in results[choice].keys():
                            results[choice]['accessions'] +=  accessions
                        else:
                            results[choice].update( { 'accessions' : accessions } )
                else:
                    pass
                delete.append( node )
                continue
            else:
                if 'accessions' in results[node].keys():
                    self.Print( "there's {0} sequences:-".format( len( results[node]['accessions'] ) ) )
                else:
                    delete.append( node )
        for node_to_go in delete:
            del(results[node_to_go])
        delete = []
        for node in results:
            if 'accessions' not in results[node].keys():
                delete.append( node )
            else:
                total += len(results[node]['accessions'])
        for node_to_go in delete:
            del(results[node_to_go])
        if Ireversed > 0:
            self.Print('# Reversed {0} sequences'.format(Ireversed))
        return results

    def separate(self):
        if '.' in CONFIG.options['-out']:
            infile, suffix = CONFIG.options['-out'].rsplit('.', 1)
            outfile = '{0}_SSUMMO_separated.{1}'.format(infile, suffix)
        else:
            outfile = sys.stdout
        self.seq_db.prefetchQ.put(('separate', outfile))
        self._iter_results()

    def shuffle_along(self, results_dict, start_dir, start_node):
        rel_start = start_dir[len(CONFIG.arbDBdir):].strip(os.path.sep)
        rel_path = rel_start.split( os.path.sep )
        first = rel_path[0]
        accessions = [accession for accession in \
                      get_accessions(results_dict[first], accessions=[])]
        for path in rel_path:
            results_dict = results_dict[path]
        results_dict.update( { 'accessions' : accessions } )
        return results_dict, start_node

        last_dir = os.path.realpath(CONFIG.arbDBdir).rstrip(os.path.sep)\
                                                    .rsplit(os.path.sep, 1)[-1]
        start_dir_list = os.path.realpath(start_dir).rstrip(os.path.sep)\
                                                    .split(os.path.sep)
        if last_dir == start_dir_list[-1]:
            folders = []
        else:
            try:
                # Try to find the nodes in relation to the arbDBdir.
                folders = start_dir_list[start_dir_list.index(last_dir) + 1:]
            except ValueError:
                err = str('arbDBdir in CONFIG.py ({0}) must start with ' + \
                          'same file path as -start ({1})'
                          .format(CONFIG.arbDBdir, start_dir))
                raise ValueError()
        node = results_dict
        #start_nodes = set( [folder for folder in folders] )
        start_nodes = set( start_node.keys() )
        for folder in folders:
            ## Delete irrelevant nodes from start_node.
            node_names = set( node.keys() )
            other_nodes = node_names.difference( start_nodes )
            for other_node in other_nodes:
                del( node[other_node] )
            ## Move into next node.
            if 'accessions' in node:
                node.update( { folder : { 'accessions': node.pop('accessions') } } )
            try:
                node = results_dict[folder]
                start_node = start_node[folder]
            except KeyError, e:
                sys.stderr.write( repr(e) + '\n' )
        return results_dict, start_node

    def shuffle_back(self, results, start_dir):
        full_results = {}
        node_path = start_dir[ len( CONFIG.arbDBdir ):].strip( os.path.sep ).split( os.path.sep )
        for node in node_path:
            full_results.update( { node : {} } )
        full_results.update( results )
        return full_results

    def shutdown(self):
        if CONFIG.debug:
            print('[Application.shutdown] END --> SeqDB')
        self.seq_db.prefetchQ.put('END')  ## SeqDB will end Distributor. Distributor will end hmmsearch threads.
        #print('[Application.shutdown] stopping Scorer')
        #self.hmmsearch_outQ.put('END')  ## Scorer inQ
        if CONFIG.debug:
            print('[Application.shutdown] joining seq_db')
        self.seq_db.join()
        if CONFIG.debug:
            print('[Application.shutdown] joining threads')
        for thread in self.threads:  # Is it quicker to do in two loops or one?
            thread.join()
        if CONFIG.debug:
            print('[Application.shutdown] joining scorer')
        self.scorer.join()
        if CONFIG.debug:
            print('[Application.shutdown] successfully joined everything')

    def _start_threads(self):
        self.threads = []
        thread_pipes = [multiprocessing.Pipe() for i in xrange( int(CONFIG.options['-ncpus']))]
        self.score_pipe_parent, self.score_pipe_child = multiprocessing.Pipe()
        self.hmmsearch_outQ = multiprocessing.Queue()
        self.lock = multiprocessing.RLock()
        for i in xrange(int(CONFIG.options['-ncpus'])):
            self.threads.append(HmmSearcher(thread_pipes[i][0],
                                thread_pipes[i][1], self.hmmsearch_outQ,
                                self.lock))
        CONFIG.printHeaders = True   # Doesn't actually work. In case we wanted to print hmmsearch results.
        for thread in self.threads:
            thread.start()
        self.seq_file_name = CONFIG.options['-in']
        #### Create queues & pipes.
        self.prefetchQ = multiprocessing.Queue() # Send accessions for SeqDb prefetching.
        self.distribute_Q = multiprocessing.Queue()
        distrib_end_Q = multiprocessing.Queue()
        self.reverse_pipe_in, self.reverse_pipe_out = multiprocessing.Pipe()
        #### Create processes / threads (calls __init__ on each one).
        self.seq_db = SeqDB(self.seq_file_name,
                           self.prefetchQ,
                           self.distribute_Q,
                           distrib_end_Q,
                           self.threads,
                           self.lock,
                           reverse_pipe = self.reverse_pipe_out,
                           format=CONFIG.options['-format'])
        self.scorer = Scorer(self.hmmsearch_outQ, self.score_pipe_parent,
                             distrib_end_Q, self.prefetchQ )
        #### Start them.
        self.scorer.start()
        self.seq_db.start()

    def Print(self, string):
        self.Write('{0}\n'.format(string))

    def Write(self, string):
        self.lock.acquire()
        sys.stdout.write( string )
        sys.stdout.flush()
        self.lock.release()

def SSUMMO(node, path, results_node, seq_db, result_pipe, diverge_dict={}):
    """Main SSuMMo function loop. Recurses through the nested dictionary `node`,
    searching each sequence loaded in seq_db against each HMM built for taxa
    specified by the tree `node`.

    :param node: Nested dictionary representing current node in tree of life.
    :param path: Absolute or relative file path to current node's HMM directory.
    :param results_node: Where results are stored [in/out].
    :param seq_db: :class:`.SeqDB` instance containing all query sequences.
    :param result_pipe: Used for communication with :class:`.Scorer` instance.
    :param diverge_dict: For internal use.

    """
    ## At each path, choose which dir to go into, by performing
    ## hmmsearch on the fasta sequence against each next HMM.
    depth = path.count( os.path.sep ) - CONFIG.arbDBdir.count( os.path.sep )
    node_keys = sorted(node.keys())
    if 'accessions' in node_keys:
        del(node_keys[node_keys.index('accessions')])
    num_nodes = len(node_keys)

    if num_nodes == 0:
        return results_node, diverge_dict
    ## Merge ambiguous results with results_node
    results_node, diverge_dict = merge_diverge(results_node, diverge_dict, node)
    seq_db.outQ.put(( 'reset', path, node_keys, )) ## --> SequenceDistributor.

    ## First time we enter SSUMMO(), SeqDB already has all sequences loaded.
    if os.path.realpath(path) == os.path.realpath( CONFIG.options['-start']):
        seqs = 'all'
        seq_db.prefetchQ.put(('get_all', None))
    else:
        accessions = results_node['accessions']
        seq_db.prefetchQ.put(('get', accessions, ))  # --> SeqDB.

    if num_nodes == 1:   # If only one node, no need for hmmsearch'ing.
        singleNode = node_keys[0]
        results_node = {singleNode: {
                         'accessions' : results_node.pop('accessions') }}
        seq_db.prefetchQ.put(('Skip', None))
    else:
        seq_db.prefetchQ.put(('Proceed', None))
        results_node, diverge_dict = score(result_pipe, node, path,
                                           diverge_dict=diverge_dict)

    results_node, diverge_dict = sep_diverge(results_node, diverge_dict,
                                             node_keys, seq_db)
    if len(results_node) == 0 and diverge_dict == {}:
        print("##\tNo results")
        print("##\t{0}".format(node_keys))
        print("##\t{0}".format(path))
        # Actually could throw an error in here. i.e. no need for the clause..?
        # Don't think I've ever actually seen these printed...
        return (results_node, diverge_dict, )
    # Separate the results after doing SSUMMO on all nodes.
    choices = sorted(results_node.keys())

    # Recursively do SSUMMO.
    for choice in choices:
        if len(results_node[choice]['accessions']) == 0:
            del(node[choice])  ## If node has no results, delete it.
        else:  # otherwise recursively do SSuMMO on winning node.
            print_tree_line( choice, results_node, depth )
            results_node[choice], diverge_dict = \
                SSUMMO(node.pop(choice), os.path.join(path, choice),
                       results_node[choice], seq_db, result_pipe,
                       diverge_dict=diverge_dict)
    return results_node, diverge_dict

#############################################################################
# Helper functions for sorting out ambiguous nodes.
#############################################################################

def clean_converged(accession, diverge_dict_item):
    div_keys = diverge_dict_item.keys()
    initial_node = diverge_dict_item['start_node']
    print("{0} has converged!".format(initial_node))
    del(div_keys[div_keys.index('start_node')])
    del(div_keys[div_keys.index('start')])
    for route in div_keys:
        if os.path.sep in route:
            path = route.split( os.path.sep )
        else:
            path = [route]
        node = initial_node
        for p in path:
            node = node[p]
        if accession in node['accessions']:
            del( node['accessions'][ node['accessions'].index(accession)] )
    return

def merge_diverge(resultsDict, DivDict, tax_node):
    """ Given the results_node dictionary (resultsDict), add accessions present
    in DivDict to the appropriate nodes in resultsDict."""
    tax_keys = tax_node.keys()
    sep = os.path.sep
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        for node in DivDict[accession].keys():
            if node in ('start_node', 'start'):
                continue
            else:
                cur_node = sep.rsplit(node, 1)[-1] if sep in node else node
                if cur_node in tax_keys:
                    if cur_node in resultsDict:
                        resultsDict[cur_node]['accessions'].append(accession)
                    else:
                        resultsDict.update( { cur_node : {
                                           'accessions' : [accession] } } )
                else:
                    continue
    return (resultsDict, DivDict)

def final_merge(resultsDict, DivDict):
    reg = re.compile('|'.join(['(?<={1})({0}){1}'.format(key, os.path.sep) \
                     for key in resultsDict.keys()]))
    # reg checks for Bacteria, Archaea or Eukaryota essentially..
    for accession in DivDict.keys():
        start = DivDict[accession]['start']
        firstNode = reg.search(start)
        if firstNode:
            pathList = start[firstNode.start():].split( os.path.sep )
        else:
            pathList = []

        # get to the start node
        node = resultsDict
        for OTU in pathList:
            if OTU not in node:
                node.update( { OTU : {} } )
            node = node[OTU]

        divKeys = DivDict[accession].keys()
        for subnode, path in dict_walk('', node):
            if 'accessions' in subnode:
                if accession in subnode['accessions']:
                    del(node['accessions'][node['accessions'].index(accession)])
        if 'accessions' in node.keys():
            node['accessions'].append(accession)
        else:
            node.update( { 'accessions' : [accession] } )

        continue
        del(divKeys[divKeys.index('start_node')])
        del(divKeys[divKeys.index('start')])
        for path_to_node in divKeys:
            end_node = node
            try:
                for taxa in path_to_node.split(os.path.sep):
                    end_node = end_node[taxa]
            except KeyError:
                continue
                print(end_node.keys())
                raise
            if 'accessions' in end_node:
                if accession in node['accessions']:
                    del(end_node['accessions'][end_node['accessions'].index(accession)])

    return resultsDict

def sep_diverge(resultsDict, DivDict, node_keys, seq_db):
    """Given the results dictionary and the diverge dictionary,
    check the scores present in DivDict, looking for a single
    highest score.
    If there is a single highest score, place that in the
    appropriate location and delete it from DivDict.
    Otherwise, don't change anything."""
#   print("##\t{0} ambiguous sequences: {1}".format(len(DivDict.keys()), [seq_db.info[acc]['desc'] for acc in DivDict.keys()]))
    for accession in DivDict:
        divKeys = DivDict[accession].keys()
        del(divKeys[divKeys.index('start_node')])
        del(divKeys[divKeys.index('start')])
        scores = []
        for node in divKeys:
            scores.append( DivDict[accession][node] )
        bestScore = max(scores)
        nBest = scores.count(bestScore)
        if nBest == 1:
            print("We got a winner!")
            bestNode = divKeys[ scores.index(bestScore) ]
            if os.path.sep in bestNode:
                bestNode = bestNode.rsplit(os.path.sep, 1)[1]
            if bestNode in resultsDict.keys() and \
                    accession not in resultsDict[bestNode]:
                resultsDict[bestNode]['accessions'].append(accession)
            else:
                if accession not in bestNode['accessions']:
                    resultsDict.update( { bestNode : { 'accessions' : [accession] } } )
                else:
                    print("Don't think we need this if clause in sep_diverge")
 #               continue
                # I replaced the below line with continue to attempt to fix a bug. This may not have fixed that
                # bug and instead caused other undesired behaviour.
                #DivDict[accession] = { bestNode : [ bestScore ], 'start' : DivDict[accession]['start'] }
            del(DivDict[accession][bestNode])
            clean_converged(accession, DivDict[accession])
            del(DivDict[accession])
        else:
            continue
    return resultsDict, DivDict

#############################################################################
# Misc. functions
#############################################################################

def find_rel_path(node, OTUname, rel_path=[]):
    """Given a dictionary node, and an OTU name, will search for
    OTUname from the top of node and return the relative path,
    delimited by '/'.
    """
    for key in node:
        if key == OTUname:
            rel_path.append(key)
            return rel_path
        elif key == 'accessions':
            continue
        else:
            next_path = rel_path + [key]
            x = find_rel_path(node[key], OTUname, rel_path=next_path)
            if isinstance(x, list):
                return x
            else:
                continue

def find_start(tdict):
    """Locates the directory where to enter the SSUMMO loop.
    Default is to start in arbDBdir, which is configured in
    CONFIG.py.
    To change, give the command option '-start /some/path/to/dir'
    """
    if os.path.realpath(CONFIG.options['-start']) == os.path.realpath(CONFIG.arbDBdir):
        return tdict, CONFIG.arbDBdir
    else:
        found = False
        startKeys = tdict.keys()
        startDir = CONFIG.options['-start']
        startDir = startDir.rstrip(os.path.sep)
        pathList = CONFIG.options['-start'].split( os.path.sep)
        for key in startKeys:
            if key in pathList:
                firstNode = pathList.index(key)
                found = True
                break
            else:
                continue
        node = tdict
        if not found:
            return tdict, CONFIG.arbDBdir
        for nodeName in pathList[firstNode:]:
            if nodeName.strip() == '':
                continue
            else:
                node = node[nodeName]
                parentNode = nodeName
        startDir = os.path.join(CONFIG.arbDBdir,
                                os.path.sep.join(pathList[firstNode:]))
        CONFIG.options['-start'] = startDir
    print("##\tStarting SSUMMO from node '{0}' at path '{1}'"
          .format(parentNode, startDir))
    return node, startDir

def get_tax_index(silent=False):
    """Returns the full ARB taxonomy as a nested dictionary, but only for the
    main three kingdoms: Archaea, Bacteria and Eukaryota."""
    tdict = load_index(silent=silent)
    for key in tdict.keys():
        if key not in ['Bacteria', 'Eukaryota', 'Archaea']:
            del(tdict[key])
    return tdict

def print_ambiguous_results(amb_nodes):
    print('# Ambiguous nodes:-\n'),
    for amb_node in amb_nodes:
        comp = ' & '.join(amb_node)
        print('# {0} - {1}'.format(comp.ljust(40), amb_nodes[amb_node]))

def print_tree_line(choice, results_node, depth):
    choices = sorted( results_node.keys() )
    if choice == choices[0] and len(choices) == 1:
        sys.stdout.write('{0}|- {1}'.format(' ' * depth * 3, choice).ljust(45) +
                ' ({0})\n'.format(len(results_node[choice]['accessions']) ))
    elif choice == choices[0]:
        sys.stdout.write('{0}+- {1}'.format(' ' * depth * 3, choice).ljust(45) +
                ' ({0})\n'.format(len(results_node[choice]['accessions']) ))
    elif choice == choices[-1]:
        sys.stdout.write('{0}|_ {1}'.format(' ' * depth * 3, choice).ljust(45) +
                ' ({0})\n'.format(len(results_node[choice]['accessions']) ))
    else:
        sys.stdout.write('{0}|- {1}'.format(' ' * depth * 3, choice).ljust(45) +
                ' ({0})\n'.format(len(results_node[choice]['accessions']) ))
    sys.stdout.flush()

def save(save_name, object):
    try:
        with file(save_name, 'wb') as save_file:
            pickle.dump( object, save_file, -1 )
            print("##\tSaved pickle object to '{0}' ".format(save_file.name)),
            print("(Use this to draw trees etc. other scripts)")
    except IOError:
        if sys.stdin.closed:
            suffix = 'results'
        else:
            suffix = 'results'
            if len(suffix) == 0:
                suffix = 'results'
        save_name = suffix + '.pkl'
        i = 0
        while os.path.exists( save_name ):
            i += 1
            save_name = '{0}{1}.pkl'.format(suffix, i)
        print('##\tSaving to {0}'.format(save_name))
        CONFIG.options['-out'] = save_name
        save( save_name, object )

def score(scorer_pipe, tax_node, path, results_dict=None, diverge_dict=None):
    """Called once per iteration. Therefore needs to receive the node names once
    at the start. Then it should start receiving lots of hmmmsearchThread results.
    """
    if results_dict is None:
        results_dict = {}
    elif 'accessions' in results_dict:
        results_dict.pop( 'accessions' )
    if diverge_dict is None:
        diverge_dict = {}
    inval = scorer_pipe.recv()  # This'll be the (sorted!) node names.
    nodes = inval
    inval = scorer_pipe.recv()  # This'll then be (accession, results, ) for a single sequence.
    choices = set()
    unique_accessions = []
    ambiguous_nodes = {}
    while inval != 'end':
        try:
            accession, results = inval
            Evals, scores = results
        except ValueError:
            raise IOError(inval)
            #raise IOError( "[score] Scorer ended with {0}".format( inval ) )
        best_score = max( scores )
        n_best = scores.count( best_score )
        if n_best == 1:
            choice = nodes[ scores.index( best_score ) ]
            if choice in choices:
                results_dict[choice]['accessions'].append( accession )
            else:
                choices.add( choice )
                results_dict.update( { choice : {'accessions': [accession] } } )
            if accession in unique_accessions:
                print("Already got {0} assigned to".format(accession)),
                for ch in choices:
                    if accession in results_dict[ch]['accessions']:
                        print(ch, ' '),
                    print('Now we have it for {0}'.format(choice))
            unique_accessions.append( accession )
        elif n_best > 1:  ## Ambiguous node!
            CONFIG.ambiguousCount += 1
            prev_index = -1
            top_scorers = []
            for count in xrange(n_best):
                prev_index = scores.index(best_score, prev_index + 1)
                top_scorers.append(nodes[prev_index])
                if accession in diverge_dict.keys():
                    OTU_order = os.path.join( find_rel_path(
                                        diverge_dict[accession]['start_node'],
                                        top_scorers[-1] ))
                    diverge_dict[accession].update( { OTU_order : best_score } )
                else:
                    diverge_dict.update( { accession : {
                                    top_scorers[-1] : best_score,
                                        'start' : path,
                                        'start_node' : tax_node } })
            top_scorers = tuple( top_scorers )
            if top_scorers in ambiguous_nodes:
                ambiguous_nodes[top_scorers] += 1
            else:
                ambiguous_nodes.update( { top_scorers : 1 } )
            #print('Ambiguous node between: {0}'.format(' & '.join(top_scorers)))
        elif n_best == 0:
            print('##\tno matches for accession {0}'.format(accession))
        else:
            print("##\tn_best = {0}".format(n_best))
            raise ValueError("Can't figure out the winning nodes")
        inval = scorer_pipe.recv()
    if len(ambiguous_nodes) > 0:
        print_ambiguous_results(ambiguous_nodes)
    return results_dict, diverge_dict

def main(options):
    t0 = time.time()
    ins, outs = ( options['-in'], options['-out'], )
    CONFIG.options = options
    CONFIG.debug = options['--debug']
    n_files = len(ins)
    for file_ind in xrange(n_files):
        t1 = time.time()
        #CONFIG.options = options.deepcopy()
        CONFIG.options.options['-in' ] = ins[file_ind]
        CONFIG.options.options['-out'] = outs[file_ind]
        Iprepare = Application(CONFIG.options)
        results_dict = Iprepare.enter_loop()
        print("##\t{0} ambiguous results".format(CONFIG.ambiguousCount))
        ### Save results files
        save('{0}.pkl'.format(CONFIG.options['-out'].rstrip('.')), results_dict)
        if options['--createXML']:
            startNode, startDir = find_start(get_tax_index())
            print("##\tWriting phyloxml file to '{0}.xml'"
                  .format(options['-out']))
            with file('{0}.xml'.format(options['-out']), 'w') as write_handle:
                write_xml(startDir, write_handle, results_dict)
        t = time.time()
        mins = int(t-t0) / 60
        print("##\tFinished {0} in {1} mins{2} secs"
              .format(CONFIG.options['-in'], mins, (t - t0) - (mins * 60)))
    t = time.time()
    mins = int(t-t0) / 60
    print("##\tFinished everything in {0} mins{1} secs"
          .format( mins, (t - t0) - (mins*60)))

if __name__ == '__main__':
    args = sys.argv[1:]
    CONFIG.options = SsummoOptions( args )
    #cProfile.run( 'main( CONFIG.options )' )
    main( CONFIG.options )
