#!/usr/bin/env python

"""
This script looks through a fasta file of SSU... file downloaded from ARB,
creating a directory hierarchy that reflects the taxonomic ordering as presented
by the ARB data file.

In each of these directories a file ('accessions.txt') is created that contains
all the accessions that are descendants of that taxonomic rank.
"""

from ssummo.ssummolib import reduceToGenus, get_accessions
from ssummo.taxonomy import TaxDB
from ssummo.traverse import dict_walk, find_start, my_walk
from ssummo.cmd_options import Options
from ssummo import ArbIO
from ssummo import CONFIG

import os
import re
import sys
import subprocess
import time
import multiprocessing
import threading

try:
    import cPickle as pickle
except ImportErrror:
    import pickle

from Bio import SeqIO

class SeqDB( multiprocessing.Process) :
    """Asynchronous process for managing large sequence files released by the
    ARB Silva database. General workflow should look something like this::

        arb_db_process = SeqDB()
        arb_db_process.start()

    The order of the following arb_db_process method calls is important, and
    can be repeated as many times as necessary, before finally calling
    :func:`.finalise`. ::

        arb_db_process.init_seq_file('SSU_Ref_...fas')
        accessions = ['NC_123456', ...]
        arb_db_process.put_ids(accessions)
        seqs = []
        for acc in accessions:
            seq = arb_db_process.get_seq()
            seqs.append(seq)
        arb_db_process.stop()

    Once no more sequence files need to be read, call ::

        arb_db_process.finalise()

    """

    def __init__(self):
        multiprocessing.Process.__init__(self)
        self.seq_db_queue = multiprocessing.Queue()
        out_seq_pipe, in_seq_pipe = multiprocessing.Pipe()
        self._out_seq_pipe = out_seq_pipe
        self._in_seq_pipe = in_seq_pipe

    def init_seq_file(self, path):
        """Initialise a sequence file for reading. Should be called by client
        thread immediately after thread is started and before :func:`.put_id`
        is called.

        Internally, (i.e. in the SeqDB subprocess) create a new
        :class:`ArbIO.ArbIO` instance, on the sequence file, and reads sequences
        using that.
        """
        self.seq_db_queue.put(path)

    def finalise(self, ids):
        self.seq_db_queue.put('END')

    def get_seq(self):
        """Used by client threads, to retrieve a sequence from the SeqDB
        process."""
        return self.out_seq_pipe.recv()

    def put_ids(self, ids):
        """Send accession numbers to the SeqDB server, for asynchronous
        retrieval from :func:`.get_seq`.

        :param ids: should be a sequence of accession numbers identifiable in
                    the sequence file."""
        self.seq_db_queue.put(id)

    def run(self):
        get = self.seq_db_queue.get
        seqFileName = get()
        send_seq = self._in_seq_pipe.send
        while seqFileName != 'END':
            ArbDB = ArbIO.ArbIO(inHandle=seqFileName, index=True)
            fetch = ArbDB.fetch
            accessions = get()
            while accessions is not None:
                sequences = [s for s in fetch(accessions)]
                for seq in sequences:
                    send_seq(seq)
                accessions = get()
            ArbDB.close()
            seqFileName = get()

    def stop(self, ids):
        """Stop reading from seq_file, waiting for either a new seq file, or
        :func:`.finalise` to be called."""
        self.seq_db_queue.put(None)

    def __del__(self):
        self.seq_db_queue.close()
        self._out_seq_pipe.close()
        self._in_seq_pipe.close()

class HMMBuilder:
    """More of a checker than a builder. Provides methods to check whether or
    not to rebuild an HMM, and manages Locks, Pipes and Queues controlling their
    building."""

    def __init__(self):
        self.Lock = multiprocessing.RLock()
        self.Q = multiprocessing.Queue()
        self.tax_pipe, self.out_pipe = multiprocessing.Pipe()

    def __del__(self):
        sys.stdout.flush()
        self.Q.close()
        self.tax_pipe.close()
        self.out_pipe.close()

    def checkhmm(self, cwd, hmmName, NSeqs):
        taxid = None
        OTUName = hmmName[:hmmName.rfind('.')]
        if os.path.getsize(os.path.join(cwd, hmmName)) != 0:
            handle = file(os.path.join(cwd, hmmName), 'r')
            top_line = handle.readline()
            out = top_line
#           if 'HMMER2.0' in top_line:
#               os.system('rm -f "{0}/{1}.hmm"'.format(cwd, pwd))
#               contents = os.listdir(cwd)
#               pass
#           else:
#           print('\tAlready done')
            change = False
            countOfSlash = 0
            recv = self.tax_pipe.recv
            for line in handle:
                if line.startswith('NAME'):
                    taxid, rank = self.receiveTaxID(recv(), OTUName)
                    assigned_name = line[4:].strip()
                    if 'alignment' in assigned_name:
                        line = line.replace('alignment', taxid)
                        change = True
                    elif str(assigned_name) != str(taxid):
                        print("changing name from {0} to {1} in HMM"
                              .format(assigned_name, taxid))
                        line = line.replace(assigned_name, taxid)
                        change = True
                elif line.startswith('NSEQS'):
                    HMMnseqs = int(re.split(r'\s', line, 1)[1].strip())
                    if HMMnseqs != NSeqs:
                        return True, taxid
                elif line.startswith(''):  # Check alignment length!
                    pass

                elif '//' in line:
                    countOfSlash += 1
                out += line
            handle.close()
            if countOfSlash > 1:
                build = True
                os.remove(os.path.join(cwd, hmmName))
            elif change == True:
                with file(handle.name, 'w') as handle:
                    handle.write(out)
                build = False   # As doing it again from scratch...
            else:
                build = False
            del(out)
        else:
            build = True
            os.remove(os.path.join(cwd, hmmName))
        if taxid is None:
            taxid, rank = self.receiveTaxID(self.tax_pipe.recv(), OTUName)
        sys.stdout.flush()
        return build, str(taxid), rank

    def receiveTaxID(self, tax_name_ID, OTU_name):
        """Receive information from the :class:`TaxDB` process.

        :param tax_name_ID: A dictionary sent by pipe from
                            :func:`TaxDB._tax_ID_thread`.
        :param OTU_name: is the name of the OTU according to the ARB and the
                         SSUMMO database.

        :returns: The tuple ``(tax_ID, rank)``, if ``tax_name_ID` is is unique
                  and ``OTU_name`` matches. If the received tax_id is `0`, then
                  the returned `tax_ID` is the matched OTU name, instead of the
                  taxonomic ID.
        """
        if len(tax_name_ID) == 1 and tax_name_ID.keys()[0] == OTUName:
            pass
        else:
            self.Q.put(tax_name_ID)   # Put it back for another process to get.
            recv = self.tax_pipe.recv
            put = self.Q.put
            tax_name_ID = recv()
            while tax_name_ID.keys()[0] != OTUName:
                put(tax_name_ID)
                tax_name_ID = recv()
        x = tax_name_ID.values()[0]
        taxid, rank = str(x[0]), x[1]
        if taxid == '0':
            taxid = OTUName
        return (taxid, rank)

    def buildhmm(self, cwd, nSeqs):
        """
        Checks an HMM given in cwd, returns True if it should be rebuilt, along
        with the NCBI taxonomic identifier and it's rank.
        """
        taxPath = cwd.rstrip(os.path.sep).split(os.path.sep)
        # Split the path into a list
        pwd = taxPath[-1]   # pwd = last directory name
                            # cwd = full path
        parentName = taxPath[-2]
        if re.search(r'(Bacteria)|(Eukaryota)|(Archaea)', pwd):
            parentName = 'root'
        hmmName = pwd + '.hmm'
        prefix = CONFIG.hmmerdir
        if 'Archaea' in taxPath:
            table = 'Prokaryotes'
            org = 'arc'
        elif 'Bacteria' in taxPath:
            table = 'Prokaryotes'
            org = 'bac'
        elif 'Eukaryota' in taxPath:
            table = 'Eukaryotes'
            org = 'euk'
        else:
            raise KeyError("can't determine the kingdom!")
        taxid = None
        # For later retrieval of taxID. The other end of this Q is in
        # TaxDB._tax_ID_thread
        self.Q.put([table, pwd, parentName])
        contents = os.listdir(cwd)
        if hmmName in contents:  # If HMM already there, run some checks...
            build, taxid, rank = self.checkhmm(cwd, hmmName, nSeqs)
            return build, str(taxid), rank
        else:
            taxid, rank = self.receiveTaxID( self.tax_pipe.recv() , pwd )
            build = True
        return build, taxid, rank

def buildhmms(path, in_file, tdict, TaxDBObj, seq_db, node='all', threads=None):
    """Traverse the (indexed) directory hierarchy and runs hmmalign, hmmbuild
    in each of the directories from top onwards."""
    from os.path import exists, join, sep
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    # # Sort out the nodes to be traversed.
    dirs = []
    orgs = ['Bacteria', 'Archaea', 'Eukaryota']
    print('Initiating sequence databases')
    if node == 'all':
        if path.endswith(sep):
            print("Starting with the subdirectories of {0}".format(path))
            for key in tdict.keys():
                dirs.append(join(path, key))
        elif not re.search(r'(Bacteria)|(Eukaryota)|(Archaea)', path):
            for folder in tdict.keys():
                dirs.append( join( path , folder ))
        else:
            dirs = [path]
            pass
    elif node in tdict.keys():
        dirs = [join(path, node)]
    else:
        sys.stderr.write("%s is not a recognised node. Available nodes are:-" % node)
        sys.stderr.write("\t'Bacteria', 'Archaea' & 'Eukaryota'")
        raise KeyError
    if len(dirs) == 0:
        dirs.append(path)
    # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
    print("Traversing these {0} directories:".format(len(dirs)))
    print('\n'.join(dirs))
    build_q, build_p = (Builder.Q, Builder.out_pipe)
    for folder in dirs:
        try:
            domain = re.search(r'(Bacteria)|(Eukaryota)|(Archaea)', folder).group()
        except AttributeError:
            print("Can't find domain from start dir: {0}".format(folder))
            continue
        Builder = HMMBuilder( )
        seq_db.init_seq_file(join(CONFIG.top, domain + 'noGaps.fas'))
        tax_ID_thread = TaxDBObj.create_thread(build_q, build_p)
        tax_ID_thread.start()
        print("Building HMMs within {0} domain".format(folder))
        got = set()
        threadIndexes = {}
        threadInd = -1
        looped = 0
        org = folder[folder.rfind('/')+1:]
        node = tdict
        if org in tdict.keys():
            node = tdict[org]
        ignoreList = []
        for temp_path, taxNode in dict_walk(folder, node, random=True):
            org = temp_path[temp_path.rfind(sep)+1:]
            accessions = get_accessions(taxNode, accessions=[])
            nseqs = len(accessions)
            fullHmmName = join(temp_path, org + '.hmm')
            if not exists(temp_path):
                os.makedirs(temp_path)
            if exists(join(temp_path, 'lock.file')):
                continue
            try:
                x = file(join(temp_path, 'lock.file'), 'w')
                x.close()
                tobuild, taxid, rank = Builder.buildhmm(temp_path, nseqs)
#               if re.search(r'genus', rank, re.I):
#                   ignoreList.append(r'({0})'.format(temp_path) )
#                   if len(ignoreList) > 50:
#                       del(ignoreList[:10] )
#                   ignoreReg = '|'.join(ignoreList)
                if tobuild:
                    #SeqDBProcess.SeqDBQ.put(accessions)
                    seq_db.put_ids(accessions)
                    if threadInd >= len(threads) - 1:
                        threadInd = 0
                        looped += 1
                    else:
                        threadInd += 1
                    Builder.Lock.acquire()
                    print('Submitting hmmbuild job with {0} accessions to {1}'
                          .format(nseqs, temp_path))
                    sys.stdout.flush()
                    Builder.Lock.release()
                    threads[threadInd].acquire()
                    threads[threadInd].put_info((fullHmmName, nseqs, taxid))
                    #threads[threadInd].inQueue.put([fullHmmName, nseqs, taxid])
                    for N in xrange(nseqs):
                        # Retrieve seq from one thread and send it down a pipe.
                        threads[threadInd].send_seq(seq_db.get_seq())
            except Exception:
                raise
            finally:
                os.remove(join(temp_path, 'lock.file'))
        Builder.Q.put('STOP')
        #SeqDBProcess.SeqDBQ.put('STOP')
        seq_db.stop()
        tax_ID_thread.join()
        del(Builder)
    for job in threads:
        job.inQueue.put(['STOP', None, None])
        job.join()
    seq_db.finalise('END')

def presshmms( tdict ):
    """Traverses tdict, pressing sub-node HMMs into the parent node"""
    prefix = CONFIG.hmmerdir
    builder = HMMBuilder()
    for path, folders in my_walk(CONFIG.arbDBdir, tdict):
        contents = os.listdir(path)
        directory = path[path.rfind( os.path.sep )+1:]
        out_name = dir+'.hmm'
        if folders == []:
            continue
        elif out_name+'.h3m' in contents and out_name+'.h3i' in contents and out_name+'.h3f' in contents and out_name+'.h3p' in contents:
            pass
        path = path.strip()
        if path.endswith('/'):
            path = path[:-1]
        print("pressing {0} dirs in {1}".format(len(folders), path))
        with file(os.path.join(path, '{0}_to_press.hmm'.format(dir)), 'w') as out_handle:
            for folder in folders:
                temp_path = os.path.join(path, folder)
                builder.buildhmm(temp_path)
                with file(os.path.join(temp_path, folder + '.hmm'), 'r') as in_handle:
                    read = in_handle.read()
                    if read.endswith('\n'):
                        out_handle.write(read)
                    else:
                        out_handle.write(read + '\n')
        cmd = [ os.path.join(prefix, 'hmmpress'), '-f',
                os.path.join(path, directory + '_to_press.hmm')]
        subprocess.call(cmd, shell=False)
        os.remove(os.path.join(path, directory + '_to_press.hmm'))
        for ext in ['h3m', 'h3i', 'h3f', 'h3p']:
            os.rename(os.path.join(path, directory + '_to_press.hmm.' + ext),
                      os.path.join(path, directory + '.' + ext))
    return

def taxonomic_dictionary(file_handle='', filetype='dir_index'):
    """As input, give a file handle to a fasta file from the ARB. This will
    return a dictionary of dictionaries representing all the taxonomic
    identities of all the contained sequences.

    If using the pickled dictionary file (taxIndex in CONFIG.py), be sure to
    open the file in BINARY mode, using the 'b' flag.

    e.g. ::

        with handle as file(taxIndex, 'rb'):
            tdict = taxonomic_dictionary(handle)
    """
    folder_hierarchy = {}
    if type(file_handle) == file and filetype != 'fasta':
        print("reading taxonomies from file: {0}".format(file_handle.name))
        if file_handle.name == os.path.join( CONFIG.top, CONFIG.taxIndex):
            return pickle.load(file_handle)
    elif CONFIG.taxIndex in os.listdir(CONFIG.top) and filetype != 'fasta':
        with file(os.path.join( CONFIG.top, CONFIG.taxIndex), 'rb') as in_handle:
            print('Loading {0}'.format(in_handle.name))
            results_dict = pickle.load(in_handle)
#            if len( results_dict ) < 6:
#                print("{0} is incomplete. Deleting...".format(in_handle.name))
#                os.remove( in_handle.name )
        return results_dict
    if filetype == 'fasta':
        for line in file_handle:
            if line.startswith('>'):
                accession, order = re.split(r'\s+', line[1:].rstrip(), 1)
                order = re.sub("(\/)|(['\"])", lambda m: '' if m.groups()[1] else '.', order)
                order = order.split(';')
                node = folder_hierarchy
                for dir in order:
                    if dir == '':
                        continue
                    elif dir not in node:
                        node[dir] = {}
                    node = node[dir]
                if 'accessions' in node.keys():
                    node['accessions'].append(accession)
                else:
                    node.update( { 'accessions' : [accession] } )

            else:
                continue
        with file(os.path.join( CONFIG.top, CONFIG.taxIndex), 'wb') as out_handle:
            print('pickling TaxDict into ', out_handle.name)
            pickle.dump(folder_hierarchy, out_handle, -1)
    elif filetype == 'dir_index' or 'file_list' in file_handle.name:
        if file_handle == '':
            raise IOError( 'Please run --indexTaxa first' )
        for line in file_handle:
            order = line.rstrip().split( os.path.sep )
            node = folder_hierarchy
            for dir in order:
                if dir == '':
                    continue
                elif dir not in node:
                    node[dir] = {}
                node = node[dir]
            else:
                continue
        with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'wb') as out_handle:
            pickle.dump(folder_hierarchy, out_handle, -1)
    else:
        raise IOError("Unrecognised index file")
    print("All taxonomies read succesfully.")
    return folder_hierarchy

def fasta_to_index_file(in_file):
    """Reads fasta file into dictionary, outs it to file of directory indexes."""
    ## turn fasta file into dictionary.
    tdict = taxonomic_dictionary(file_handle=in_file, filetype='fasta')
    out_file = file('file_list.txt', 'w')
    ## Index for slicing the pathnames to organism name.
    top_len = len(CONFIG.arbDBdir) + 1
    print("traversing {0} according to dictionary {1}".format(CONFIG.arbDBdir, 'node'))
    for path, dirs in my_walk('', tdict):
        out_file.write(path+'\n')
    print("saved relative file paths to {0}".format(out_file.name))
    out_file.close()
    return tdict

def hmm_checker(in_file, options={'-start' : CONFIG.arbDBdir}):
    """Walks all directories and prints out the number of sequences.

    :param in_file: file handle for taxonomic index.
    :param options: dictionary of configuration options, passed to
                    :func:`find_start`.

    Usage :: 

        from dictify import hmm_checker
        index_handle = file('file_list.txt', 'r')
        remaining_seqs, remaining_hmms = hmm_checker(index_handle)

    This same command can be run directly from the command line with ::

        python dictify.py check Bacteria Archaea

    :note: This function relies on the path configured in CONFIG.py
           :data:`ssummo.CONFIG.arbDBdir`.
    """
    from count_hmms import countseqs
    top_len = len(options['-start'])
    done_hmm_count = 0
    misplaced_hmm_count = 0
    done_misplaced_count = 0
    ndirs = 0
    tot_seqs = 0
    nseqs_left = 0
    nleaves = 0
    tax_dict = taxonomic_dictionary(in_file, 'dir_index')
    node, startDir = find_start(tax_dict, options)
    if node == 'all':
        node = tax_dict
    elif node == 'Bacteria':
        node = tax_dict[node]
        top = os.path.join(CONFIG.arbDBdir, 'Bacteria')
    elif node == 'Archaea':
        node = tax_dict[node]
        top = os.path.join(CONFIG.arbDBdir, 'Archaea')
    elif node == 'Eukaryota':
        node = tax_dict[node]
        top = os.path.join(CONFIG.arbDBdir, 'Eukaryota')
    for path, names in my_walk(top, node, random=False):
        if len(names) == 0:
            nleaves += 1
        cwd = path[path.rfind('/')+1:]
        hmmName = cwd + '.hmm'
        nseqs = countseqs(path)
        done = 0
        try:
            right_place_size = os.path.getsize(os.path.join(path, hmmName))
            if right_place_size > 0:
                done_hmm_count += 1
                done = 1
        except OSError:
            print("Can't find {0}".format(os.path.join(path, cwd)))
            pass
        try:
            wrong_place_size = os.path.getsize( path + '.hmm' )
            if wrong_place_size > 0:
                misplaced_hmm_count += 1
                if done == 0:
                    done_misplaced_count += 1
                    print('misplaced hmm in {0}'.format(path))
                    print('But we already have it in the correct place...')
                    os.rename(path+'.hmm', os.path.join(path, hmmName))
            else:
                os.remove(path + '.hmm')
        except OSError:
            pass
        if done == 0:
            nseqs_left += nseqs
        tot_seqs += nseqs
        ndirs += 1
    try:
        print('{0} out of {1} sequences processed. {2:.0f}% complete.'
              .format(tot_seqs-nseqs_left, str(tot_seqs),
                      (100. / tot_seqs) * float(tot_seqs - nseqs_left) ))
    except ZeroDivisionError:
        print("didn't traverse directories properly")
    nhmmsleft = ndirs - done_hmm_count - done_misplaced_count
    print("Number of hmms still to build:\t%s" % str(nhmmsleft))
    print('Total number of directories:  \t%s' % str(ndirs))
    print('Number of hmms done right:    \t%s' % str(done_hmm_count))
    print('number misplaced hmms:        \t%s' % str(misplaced_hmm_count))
    print('number done in wrong place, but not right place\t%s'
          % str(done_misplaced_count))
    print("Number of leaves:             \t%s" % str(nleaves))
    return nseqs_left, nhmmsleft


def getSequences(ArbIOobj, accessions, out_pipe):
    """SeqIOIndex - An Index created by IndexDB / SeqIO.index
    accessions - a list of accessions to be returned in fasta
    format"""
    send = out_pipe.send
    for accession in accessions:
        if len(accession) > 0:
            seq = ArbIOobj[accession].format('fasta')
            send( seq )
    out_pipe.close()
    return

def splitTaxa(SeqFile):
    childPipe, parentPipe = multiprocessing.Pipe()
    tdict = taxonomic_dictionary()
    keys = tdict.keys()
    accDict = {}
    for key in keys:
        node = tdict[key]
        accDict.update({ key : get_accessions(node, accessions=[])})
    inHandle = file(SeqFile, 'r' )
    print('keys', accDict.keys())
    ArbDB = ArbIO.ArbIO(inHandle=SeqFile, index=True)
    for kingdom, accessions in accDict.items():
        print('getting {0} sequences from {1}'.format(len(accessions), kingdom))
        seqproc = multiprocessing.Process(target=ArbDB.pipeSequences,
                                          args=(accessions, childPipe))
        seqproc.start()
        with file(os.path.join(CONFIG.top, kingdom+'.fas'), 'w') as kingfile:
            sys.stdout.write('Saving {0} sequences to {1}.\n'
                             .format(kingdom, kingfile.name ) )
            for i in xrange(len(accessions)):
                seqRec = parentPipe.recv()
                try:
                    kingfile.write( '>'+seqRec.id+'\n' )
                    seq = seqRec.seq.tostring()
                except AttributeError, e:
                    sys.stderr.write('Problem with ArbIO.ArbIO\n')
                    break
                for ind in xrange(0, len(seqRec) + 80, 80) :
                    kingfile.write(seq[ind:80+ind]+'\n')
        seqproc.join()
    if not inHandle.closed:
        inHandle.close()

def deleteNonUniques(tdict, tDB):
    tDB.cur.execute('SELECT * FROM NonUniques;')
    NonUniqueRows = tDB.cur.fetchall()
    tDB.cnx.close()
    NonUniques = set()
    names = set()
    for row in NonUniqueRows:
        root, name, parentName = row[:3]
        node = tdict
        print('Looking for {0} with type: {1}'.format(name, type(name)))
        for path, dirs in my_walk('', node):
            if name in path:
                print(path)
        NonUniques.add( (root, name, parentName) )
        names.add(name)
        print((root, name, parentName))
    exit()
    print("We have {0} non-unique nodes to start with".format(len(NonUniques)))
    print('names:', names)
    #print('uncultured Xanthomonas sp.' in names)
    count = 0
    deletedAccessions = []
    counter = 0
    tot = 0
    for path, dirs in my_walk( '', tdict ):
        tot += 1
        pathList = path.rstrip().split( os.path.sep )
        root = pathList[0]
        Name = pathList[-1]
        if root == Name:
            parentName = 'root'
        else:
            parentName = pathList[-2]
        if Name in names:
            print('found {0} with parentName: {1}'.format(Name, parentName))
            if (root, Name, parentName) in NonUniques:
                print('Found: {0}'.format((root, Name, parentName, )))
            else:
                print("Didn't find", root, Name, parentName)
            count += 1
            node = tdict
            for Dir in pathList[:-1]:
                node = node[Dir]
            deletedAccessions += get_accessions(node[pathList[-1]] )
            del(node[pathList[-1]])
        else:
            for row in NonUniques:
                if Name == row[1]:
                    print('found: {0}'.format((root, Name, parentName, )))
        if counter > 100 and counter < 150:
            print('counter-range: {0}'.format((root, Name, parentName)))
        counter += 1
    print("Deleted {0} accession numbers from {1} OTUs"
          .format(len(deletedAccessions), count))
    print("Traversed {0} OTUs".format(tot))
    return (deletedAccessions, tdict)

def gapbgone(files, threshold='100'):
    for node in files:
        if os.path.exists(node):
            fileName = node
        else:
            fileName = node+'.fas'
            try:
                assert os.path.exists(fileName)
            except AssertionError:
                msg = "Can't find {0}\n".format(fileName)
                msg += "Make sure you've run "
                msg += "'python dictify.py --splitTaxa <filename>' first."
                raise AssertionError(msg)
        backupFile = fileName + '.bak'
        print("Backing up {0} to {1}".format(fileName, backupFile))
        os.rename( fileName , backupFile )
        print("Running gapbgone.pl on {0}. Resaving to {1}"
              .format(backupFile, fileName))
        with file(fileName, 'w') as outHandle:
            cmd = [os.path.join(CONFIG.top, 'bin', 'gapbgone.pl'), '-p',
                    str(threshold), backupFile]
            GapGo = subprocess.Popen(cmd, shell=False, stdout=outHandle,
                                     stderr=subprocess.PIPE)
            retCode = GapGo.wait()
            if retCode != 0:
                print(GapGo.stderr.read())
        prefix = fileName[:fileName.rfind('.')]
        if os.path.exists( prefix + '.pklindex' ):
            os.remove( prefix + '.pklindex' )
        IO = ArbIO.ArbIO( fileName, index=True)
        IO.index()
        IO.close()

def IIndexTaxa(options):
    try:
        in_file_name = options['-in']
    except IndexError:
        raise IndexError("Provide a fasta sequence file with taxonomies in the headers please!")
    if in_file_name.strip()=='':
        raise IndexError("Provide a fasta sequence file with taxonomies in the headers please!")
    with file(in_file_name, 'r') as in_file: 
        fasta_to_index_file( in_file )
    print("Taxonomy Index file '{0}' has been created.".format(CONFIG.taxIndex))
    return 0

def IRewrite(options):
    """This rewrites all ARB sequences to a file in a specified format.
    This'll also dump a byte-index of the output sequences to the same
    name as the output file. This causes problems if writing to standard
    out, as won't have write permission to save to /dev/stdout.pklindex,
    so will then save byte index to the name of the input file(.pklindex).
    """
    inSeqFile = options['-in']
    outSeqFile = options['-out']
    if isinstance(outSeqFile, file) and outSeqFile != sys.stdout:
        prefix = outSeqFile.name.rsplit('.', 1)[0]
    elif outSeqFile == sys.stdout:
        prefix = options['-in'].rsplit('.', 1)[0]
    else:   # Assume outSeqFile is a pathname. We strip the extension and add `.fas` anyway.
        prefix = outSeqFile[:outSeqFile.rfind('.')]
    if not os.path.exists(inSeqFile):
        print("{0} does not exist!".format(inSeqFile))
        print("Make sure you enter either a fasta sequence or an ARB sequence file")
        print("Exiting...")
        exit(1)
    outName = prefix + '.fas'
    IO = ArbIO.ArbIO(inHandle=inSeqFile, out=outName, index=True)
    IO.dumpAndIndex(IO.outFile)
    print('Saved an index of the arb sequence file: {0}.'
          .format(prefix+'.pklindex'))
    print('Please keep in the same directory as the sequence file')
    IO.close()
    return 0

def IIndexSeqs(options):
    inSeqFile = options['-in']
    prefix = inSeqFile[:inSeqFile.rfind('.')]
    if os.path.exists( prefix + '.pklindex' ):
        os.remove( prefix + '.pklindex' )
    IO = ArbIO.ArbIO(inSeqFile, index=True)
    index = IO.index()
    with file(prefix + '.pklindex', 'wb') as outIndex:
        pickle.dump(index, outIndex, -1)
    return 0

def IDeleteNonUniques(options):
    join = os.path.join
    with file(join(CONFIG.top, CONFIG.taxIndex), 'rb') as infile:
        tdict = pickle.load(infile)
    print('Deleting NonUnique OTU names from the taxonomy database')
    TaxDBObj = TaxDB()
    deletedAccessions, tdict = deleteNonUniques( tdict , TaxDBObj)
    inFile = file(options['-in'], 'r')
    ArbIOObj = ArbIO.ArbIO(inFile, index=True)
    childPipe, parentPipe = multiprocessing.Pipe()
    seqProc = multiprocessing.Process(target=ArbIOObj.pipeSequences,
                                      args=(deletedAccessions, childPipe) )
    seqProc.start()
    with file(join(CONFIG.top, 'NonUniqueOTUNameSeqs.fas'), 'w') as outFile:
        print('Saving sequences assigned to Non Unique OTU names to NonUniqueOTUNameSeqs.fas')
        for seq in range(len(deletedAccessions)):
            outFile.write(parentPipe.recv())
    seqProc.join()
    return 0

def ISplitTaxa(options):
    inSeqFile = options['-in']
    if options['-out'] != sys.stdout:
        print("No single output file here. Creating Bacteria.fas, Archaea.fas, Eukaryota.fas and associated .fasindex files")
    splitTaxa(inSeqFile)
    return 0

def IGapBGone(options):
    if options['-in'] == '':
        with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'rb') as inhandle:
            tdict = pickle.load(inhandle)
        keys = tdict.keys()
        del(tdict)
    else:
        keys = [options['-in']]
    gapbgone(keys, options['-gapThreshold'])
    return 0

def IBuildHmms(options):
    seq_db = SeqDB()
    seq_db.start()
    sem = multiprocessing.Semaphore(options['-ncpus'])
    print("Submitting hmmbuild jobs across {0} processors".format(options['-ncpus']))
    workers = [HmmbuildProcess(sem) for i in xrange(options['-ncpus'])]
    for i in xrange(options['-ncpus']):
        workers[i].start()
    with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'rb') as taxIndFile:
        tdict = taxonomic_dictionary(taxIndFile, 'dir_index')
    tax_db = TaxDB()
    (node, startDir) = find_start(tdict, options)
    buildhmms(startDir, options['-in'], node, tax_db, seq_db, threads=workers)
    seq_db.join()
    return 0

def ICheck(options):
    tax_index = os.path.join(CONFIG.top, CONFIG.taxIndex)
    seqs, hmms = hmm_checker(tax_index, options = options)
    print("{0} sequences left to do".format(seqs_left))
    print("{0} hmms left to build".format(hmms_left))
    return 0

def IPressHmms(options):
    in_file = options['-in']
    with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'rb') as input_file:
        tdict = taxonomic_dictionary( in_file )
    for key in tdict.keys():
        if key != 'Bacteria' and key != 'Archaea' and key != 'Eukaryota':
            del(tdict[key])
    presshmms(CONFIG.arbDBdir, tdict)
    return 0

def IReduceToGenus(options):
    with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'rb') as inFile:
        tdict = pickle.load(inFile)
    taxDBObj = TaxDB()
    startCounter = 0
    for path, node in dict_walk('', tdict):
        startCounter += 1
    print('Starting with {0} nodes'.format(startCounter))
    outDict = reduceToGenus(tdict, taxDBObj)
    endCounter = 0
    for path, node in dict_walk('', tdict):
        endCounter += 1
    print('Deleted {0} nodes to end with {1} nodes'
          .format(startCounter-endCounter, endCounter))
    outIndex = CONFIG.taxIndex[:CONFIG.taxIndex.rfind('.')] + '_toGenus.pkl'
    with file(os.path.join(CONFIG.top, outIndex), 'wb') as outFile:
        pickle.dump(outDict, outFile, -1)
    return 0

def IRemoveAccessions(options):
    with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'rb') as inFile:
        tdict = pickle.load(inFile)
    for path, node in dict_walk('', tdict):
        if 'accessions' in node:
            del(node['accessions'])
    prefix, suffix = CONFIG.taxIndex.rsplit('.', 1)
    digit = re.search(r'^(.*)(\d+)$', prefix)
    if digit:
        prefix = digit.groups()[0] + str(int(digit.groups()[1])+1)
    else:
        prefix = prefix + '1'
    save = '.'.join([prefix, suffix])
    with file( os.path.join(CONFIG.top, save), 'rb') as outFile:
        pickle.dump( tdict , outFile, -1 )
    print('Saved Bare taxonomic structure to {0}'.format(outFile.name))
    return 0

def IProcessOptions(options):
    ######     First step. Build the taxonomy from sequence headers.
    if options['--indexTaxa']:
        IIndexTaxa(options)
    ###### Make sure you do this at least once... From then on, refer to the
    ###### the outfile you specified.
    if options['--rewrite']: # Resave ARB alignment to <out>
        IRewrite(options)
    ########### Next. Index Sequence locations in the arb file.
    if options['--indexSeqs']:
        IIndexSeqs(options)
    ########## Optional, but recommended. Delete Clade names which are presented in the
    ########## ARB taxonomy multiple times.
    if options['--deleteNonUniques']:
        IDeleteNonUniques(options)
    ########## Split the ARB sequence database into separate files, one per kingdom.
    elif options['--splitTaxa']:
        ISplitTaxa(options)
    if options['--gapbgone']:
        IGapBGone(options)
    ########## Third step #########
    elif options['--buildhmms']:
        IBuildHmms(options)
    ############# Anytime ##############
    elif options['--check']:
        ICheck(options)
    elif options['--reduceToGenus']:
        IReduceToGenus(options)
    ############# Further development stopped due to being slower ########
    elif options['--presshmms']:
        IPressHmms(options)
    else:
        pass

class MyOptions(Options):

    def __init__(self, *args):
        Options.__init__(self, *args)
        self.options = {  # defaults.
            '-in' : '',
            '-out' : sys.stdout,
            '-gapThreshold' : 100,
            '-ncpus' : multiprocessing.cpu_count() - 1,
            '-start' : CONFIG.arbDBdir
           }  
        self.singleargs = ['-in', '-out', '-gapThreshold', '-ncpus', '-start']
        self.commands = {
            '--indexTaxa' :     False,
            '--deleteNonUniques': False,
            '--indexSeqs':      False,
            '--splitTaxa':      False,
            '--blast' :         False,
            '--buildhmms' :     False,
            '--check' :         False,
            '--presshmms':      False,
            '--rewrite' :       False,
            '--gapbgone':       False,
            '--reduceToGenus':  False,
            '--removeAccessions': False,
           }
        self.options.update( self.commands )
        self.switches = {
            '--indexTaxa' :     'Creates a binary (pickled) index file for fast indexing of the latest ARB taxononmy, as parsed from their `_tax_` sequence file.',
            '--deleteNonUniques': 'Deletes taxa in the database that are NonUnique.',
            '--indexSeqs':      'Indexes the byte location of every sequence in the ARB sequence / alignment file, to speed searching with a reduced memory footprint.',
            '--splitTaxa':      'Splits the downloaded arbDB sequence alignment file according to taxa found in the pickled index file. Must be run after `--indexTaxa` and before `--removeAccessions`.',
            '--blast' :         'Deprecated, so probably won\'t work, but should be fairly easy to bring back to life.',
            '--buildhmms' :     'Builds all HMMs from directory `arbDB`, specified in CONFIG.py',
            '--check' :         'Checks the status and integrity of HMM database.',
            '--presshmms':      'Further development stopped due to being slower.',
            '--rewrite' :       'Rewrites an ARB sequence file, also dumping a byte index of each sequence location. Rewriting involves changing `.` chars for `-` at the start & end of each sequence, and also discarding sequences with gaps (`.`) in the middle.',
            '--gapbgone':       'Removes columns which are gaps in every sequence. Thanks to Bill Hartmann & his lab for the gapbgone perl script.',
            '--reduceToGenus':  'Reduces the taxonomic dictionary to genus specificity (i.e. deletes species, subspecies etc.).',
            '--removeAccessions':   'Removes accessions from the database index (referenced in CONFIG.py as taxIndex), saving minimised object to new file.',
            }
        self.order = ['--indexTaxa', '--rewrite', '--indexSeqs',
                      '--splitTaxa', '--gapbgone', '--buildhmms']
        self.useage = 'python dictify.py [COMMAND] [OPTIONS]'

    def print_help(self):
        sys.stderr.write('Useage:-\n{0}'.format(self.useage))
        sys.stderr.write('\n\nTHESE COMMANDS SHOULD BE CALLED IN THE FOLLOWING ORDER:\n\n')
        for line in self._OrderedList(self.order):
            sys.stderr.write( line )
        otherOptions = []
        ## Get the rest of the options.
        opts = self.switches.copy()
        opts.update( self.help_text )
        for opt in opts:
            if opt not in self.order:
                otherOptions.append( opt )
        for line in self._UnorderedList(otherOptions):
            sys.stderr.write( line )

    def _OrderedList(self, order):
        leftBuf = max([ len(key) for key in order ]) + 5
        for i, item in enumerate(order):
            helpText = self.switches[item]
            multiline = len(helpText) > 80
            if multiline:
                sys.stderr.write('{0}. {1}'.format(i + 1, item).ljust(left_buf)
                                 + helpText[:80] + '\n')
                for text_ind in xrange(1, 1 + (len(helpText) / 80)):
                    yield '{0}{1}\n'.format(' '.ljust(left_buf),
                                            helpText[text_ind * 80 :
                                            (1 + text_ind) * 80])
            else:
                yield '{0}. {1}'.format(i + 1, item).ljust(left_buf) + \
                      helpText + '\n'
        yield '\n\n{0}\n\nOTHER OPTIONS\n\n'.format('-' * 80)

    def _UnorderedList(self, keys):
        leftBuf = max([len(opt) for opt in keys]) + 4
        #sys.stderr.write( switch + self.switches[switch] )
        for key in sorted(keys):
            if key in self.switches:
                helpText = self.switches[key]
            else:
                helpText = self.help_text[key]
            multiline = len(helpText) > 80
            if multiline:
                sys.stderr.write(key.ljust(leftBuf) + helpText[:80] + '\n')
                for text_ind in xrange(1, 1 + (len(helpText)/80)):
                    yield '{0}{1}\n'.format(' '.ljust(leftBuf),
                                            helpText[text_ind * 80 :
                                                     (1 + text_ind) * 80])
            else:
                yield '{0}{1}\n'.format(key.ljust(leftBuf), helpText)


if __name__ == '__main__':
    #start timer
    t0 = time.time()

    #get command line arguments.
    args = sys.argv[1:]
    options = MyOptions()

    #overwrite default options with command line args
    options.parse_args( args )

    #do the processing
    IProcessOptions( options )

    #end timer.
    tf = time.time()
    mins = (tf - t0) / 60
    hours = int(mins / 60)
    mins_left = mins - (hours * 60)
    print("Took a total of %shrs%.2fmins to complete" % (str(hours), mins_left))
    print("Exiting...")
