#!/usr/bin/env python
"""
General library functions commonly needed for SSUMMO modules.
Not to be called from command line.

"""

from ssummo.cmd_options import Options
from ssummo.traverse import dict_walk, get_accessions
from ssummo.taxonomy import TaxDB
import ssummo.CONFIG as CONFIG

import os
import re
import sys
import cPickle as pickle
import multiprocessing

try:
    sys.path.append( os.path.join( CONFIG.top, 'bin') )
    from itol import Itol, Dataset
except ImportError:
    sys.stderr.write("\nCannot find the python Itol library in path. Will stop at creating phyloxml text file\nrather than exporting the tree image\n" )

if __name__ == '__main__':
        sys.stderr.write( 'Do not call directly!' )
        raise IOError

class ITOLCGI():
    downloadOptions = {
                    'fontSize' : 28,
                    'lineWidth' : 1,
                    'showInternalLabels' : 1,
                    'showInternalIDs' : 0,
                    'rangesCover' : 'leaves',
                    'alignLabels' : 1
                    }

    def __init__(self, out_file = None, colour_file = None ):
        self.itol_uploader = Itol.Itol()
        self.ITOLopts = {
                'treeFormat' : 'phyloxml',
                'uploadID' : CONFIG.itolID,
                'projectName' : CONFIG.itolProject,
                'showInternalIDs' : '1',
                }
        if out_file is not None:
            self.ITOLopts.update({'treeFile' : out_file.name,
                'treeDescription' : out_file.name[:out_file.name.rfind('.')], })
        if colour_file is not None:
            self.ITOLopts.update( { 'colorDefinitionFile' : colour_file } )
        self.dataset_options = {
                'Label' : '\%seqs per OTU',
                'Type' : 'multibar',
                'Separator' : 'comma',
                'MultiBarAlign' : '1'
                }

    def add_dataset( self, dataset_file , options='dataset_options' ):
        dataset = Dataset.Dataset()
        dataset.set_dataset_location( dataset_file.name )
        if options == 'dataset_options':
            dataset.add_param_dict( self.dataset_options )
        else:
            dataset.add_param_dict( options )
        self.itol_uploader.add_dataset( dataset )

    def ITOLupload( self ):
        self.XMLname = self.ITOLopts.pop( 'treeFile' )
        self.itol_uploader.set_tree_location( self.XMLname )
        self.itol_uploader.add_upload_param_dict( self.ITOLopts )
        good_upload = self.itol_uploader.upload_tree()
        if not good_upload:
            print('Error!\n', self.itol_uploader.upload_output())
        webpage = self.itol_uploader.get_webpage()
        print('Tree Web Page URL:    {0}'.format(webpage))
        return self.itol_uploader

    def ITOLdownload( self, formats=['pdf'] ):
        itol_exporter = self.itol_uploader.get_itol_export()
        graph_name = self.XMLname
        prefix = graph_name[:graph_name.rfind('.')]
        for suffix in formats:
            exportLocation = '{0}.{1}'.format(prefix, suffix)
            itol_exporter.add_export_param_dict( self.downloadOptions )
            itol_exporter.set_export_param_value('format', suffix)
            itol_exporter.set_export_param_value('datasetList', 'dataset1')
            itol_exporter.export(exportLocation)
            print('exported {0} formatted tree to {1}.'
                  .format(suffix, exportLocation ))

class seqDB(multiprocessing.Process):
    def __init__(self, seqfile, prefetchQ, outQ, pipes, format='fasta'):
        from ArbIO import ArbIO
        multiprocessing.Process.__init__(self)
        self.prefetchQ = prefetchQ
        self.outQ = outQ
        self.seqfile = file(seqfile, 'r')
        self.db = ArbIO(inHandle=self.seqfile, index=True)
        self.outpipes = pipes

    def run(self):
        #nseqs = self.db.indexAndInfo()
        inval = self.db.indexes.keys()   # All accessions
        self.outQ.put( inval )
        while inval != 'END':
            if type( inval ) == list:
                accessions = inval
                sequences = [ seq for seq in self.db.fetch( accessions ) ]
            if type(inval) == int:
                curpipe = self.outpipes[inval][0]
                [curpipe.send(sequence) for sequence in sequences ]
            inval = self.prefetchQ.get()
        self.prefetchQ.close()

    def __del__(self):
        self.seqfile.close()

class SsummoOptions(Options):
    def __init__(self, args=None):
        self.options = {
                    '-start'     : CONFIG.arbDBdir,
                    '-in'        : [],
                    '-out'       : None,
                    '-ncpus'     : multiprocessing.cpu_count() - 1,
                    '-Eval'      : str(10),
                    '-format'    : 'fasta',
                    '-score'     : str(1),
                    '-servers'   : ('localhost'),
                    '--createXML': False,
                    '--verbose'  : False,
                    '--max'      : False,
                    '--annotate' : False,
                    '--separate' : False,
                    '--debug'    : False,
                }
        self.help_text = {
                    '-start'  : 'Start node for SSUMMO. Can be a domain, or deeper, but must start at least with the domain',
                    '-in'     : 'Query sequence file.',
                    '-out'    : 'Output results file names. Suffix will change. [Optional - prefix inferred from -in]',
                    '-ncpus'  :'Number of worker processes to initiate. [CPU count - 1]',
                    '-format' :'Input file sequence format [fasta].',
                    '-servers': 'Server names. [Not implemented yet].',
                    '-Eval'   : 'hmmsearch Evalue threshhold [10]',
                    '-score'  :'hmmsearch score threshold [1]',
                }
        self.switches = {
                    '--createXML' : 'Create phyloxml output?? [No]',
                    '--verbose' : 'Print HMMer results [No!]',
                    '--max' : 'Use hmmsearch --max flag (bypass filters that remove sequences from full scoring set). [no]',
                    '--separate' : 'Separate out annotated 16S rRNA sequences. [False]',
                    '--annotate' : 'Annotate 16S rRNA sequences with species name. [False]',
                    '--debug' : 'Turn on debug output to help with threading issues',
            }
        self.useage = "python {0} [option [arg]] ...".format( sys.argv[0] )
        self.example = self.useage
        self.singleargs = [ '-ncpus', '-servers', '--createXML', '-start',
                            '-format', '--verbose', '--max', '-score', '-Eval']
        self.multiargs = [ '-in', '-out' ]
        if args is not None:
            self.local_parse_args( args )

    def __iter__( self ):
        for i in self.options:
            yield i

    def local_parse_args(self, args):
        self.options = self.parse_args( args )
        if self['-out'] is None:
            self.options.update( { '-out' : [] } )
            for option in self.options['-in']:
                if '.' in option:
                    prefix, suffix = option.rsplit('.', 1)
                else:
                    prefix = option
                self.options['-out'].append( prefix + '.' )
        if len(self['-out']) != len(self['-in']):
            msg = 'Must supply the same number of options to -in as to -out\n'
            msg+= '\n.Got {0} and {1}, respectively.\n'\
                  .format(len( self['-in']), len(self['out']))
            raise IOError(msg)  # -out specified, but have different number of -in files.


def combine_dicts(results_dicts):
    """Give a list of SSUMMO results dictionaries. This shall return a
    dictionary containing each & every node from all of those dictionaries.

    Where accessions are found assigned to a node, this will combine the
    accessions from all results_dicts into a list of lists; one list of
    accessions per results dictionary, in the same order as are passed
    to this function."""
    combined_results = {}
    ind = 0
    for results_dict in results_dicts:
        for full_path, results_node in dict_walk('', results_dict):
            if full_path == '':
                continue
            path_list = full_path.split( os.path.sep )
            combined_node = combined_results
            for node_name in path_list:
                if node_name in combined_node.keys():
                    combined_node = combined_node[node_name]
                else:
                    combined_node.update( { node_name : {} } )
                    combined_node = combined_node[node_name]
            if 'accessions' in results_node.keys():
                if 'accessions' in combined_node.keys():
                    combined_node['accessions'][ind] = results_node['accessions']
                else:
                    combined_node.update({'accessions' : [ [] for i in
                                         xrange(len(results_dicts)) ]})
                    combined_node['accessions'][ind] = results_node['accessions']
        ind += 1
    return combined_results

def reduceToGenus(tdict, TaxDB):
    tables = {  'Eukaryota' : 'Eukaryotes',
                'Bacteria' : 'Prokaryotes',
                'Archaea' : 'Prokaryotes' }
    genusList = []
    for domain in tdict.keys():
        if domain not in tables.keys():
            del(tdict[domain])
            continue
        for path, node in dict_walk(domain, tdict[domain]):
            taxList = path.split( os.path.sep )
            if re.search(r'(Bacteria)|(Eukaryota)|(Archaea)', taxList[-1]):
                parentName = 'root'
            else:
                parentName = taxList[-2]
            taxID, rank = TaxDB.fetch_tax_id(tables[domain], taxList[-1], parentName)
            if re.search(r'genus', rank, re.I ) and node.keys() != ['accessions']:
                genusList.append(path)
    for path in genusList[-1:0:-1]:
        node = tdict
        for OTU in path.split( os.path.sep ):
            node = node[OTU]
        accessions = get_accessions(node, accessions=[])
        for key in node.keys():
            del( node[key] )
        node.update( { 'accessions' : accessions } )
    return tdict

def get_combined_accessions(startNode, accessions ):
    for key in startNode.keys():
        if key == 'accessions':
            for i in xrange(len(startNode['accessions'])):
                accessions[i] += startNode['accessions'][i]
        else:
            accessions = get_combined_accessions(startNode[key], accessions)
    return accessions

def find_start_node(resultsDict, taxDict):
    """Locates the directory where to enter the SSUMMO loop.
    Default is to start in arbDBdir, which is configured in
    CONFIG.py.
    To change, give the command option '-start /some/path/to/dir'
    """
    taxNode = taxDict
    result_keys = resultsDict.keys()
    if 'accessions' in result_keys:
        del(result_keys[ result_keys.index('accessions') ])
    for path, node in dict_walk( CONFIG.arbDBdir, taxNode):
        if result_keys[0] in node:
            count = 0
            node_keys = node.keys()
            for key in result_keys:
                if key in node_keys:
                    count += 1
            if count == len(result_keys):
                return node, path[len(CONFIG.arbDBdir)+1:]
            else:
                print('mismatch between resultsKeys:-')
                print('{0}\n\n and tax keys:-\n{1}'.format(
                      ', '.join(sorted(result_keys)),
                      ', '.join(sorted(node_keys)) ) )
    sys.stderr.write("\nCan't find the start directory!!\n")
    print(result_keys)
    print('\n', taxDict.keys())

def load_index(silent=False):
    if not silent:
        sys.stdout.write( 'Loading whole taxonomy index.')
        sys.stdout.flush()
    with file(os.path.join(CONFIG.top, CONFIG.taxIndex), 'rb') as inFile:
        taxDict = pickle.load(inFile)
    if not silent:
        sys.stdout.write(' Done.\n')
    return taxDict

def collapse_at_rank(results_dict, collapse_at_rank, TaxDbObj=None,
                     tax_dict=None, combined=False, in_files=None):
    """Collapses a results dictionary at desired rank.

    :param results_dict: load (using :func:`pickle.load`) a SSUMMO results
                        file (.pkl file).
    :param collapse_at_rank: The desired rank to collapse as the value.
    :param TaxDbObj: Instance of :class:`.TaxDB`. Created if None.
    :param tax_dict: The full taxonomic dictionary index (usually referred to
                     by taxIndex in CONFIG.py).
    :param combined: Have results files already been combined?
    :param in_files: If combined, should be a list. Used to count number of
                     datasets.
    """
    if TaxDbObj is None:
        TaxDbObj = TaxDB()
    Eukaryota = re.compile( r'Eukaryota' )
    node = results_dict
    to_collapse = {}
    collapseCount = 0
    collapseRank = re.compile(r'^{0}$'.format(collapse_at_rank.strip()), re.I)
    if tax_dict is not None:
        fullnode, full_path = find_start_node(node, tax_dict)
    else:
        full_path = os.path.join( CONFIG.arbDBdir , CONFIG.taxIndex )
    sys.stdout.write('Collapsing clades at {1} level.'
                     .format(collapseCount, collapse_at_rank ) )
    sys.stdout.flush()
    ## First Iteration. Note all the nodes with desired rank.
    for path, node in dict_walk(full_path, results_dict):
        pathList = path.split(os.path.sep)
        if len(pathList) > 1:
            parent = pathList[-2]
            if Eukaryota.search( path ):
                table = 'Eukaryotes'
            else:
                table = 'Prokaryotes'
        else:
            #null, fullpath = find_start_node( node, fullnode )
            parent = path.split( os.path.sep)[-1]
            table = 'Eukaryotes' if Eukaryota.search(path) else 'Prokaryotes'
        OTU = pathList[-1]
        taxid, nodeRank = TaxDbObj.fetch_tax_id(table, OTU, parent)
        if collapseRank.search(nodeRank):
            n = to_collapse  # the node to collapse.
            collapseCount += 1
            for p in pathList:
                if p in n.keys():
                    n = n[p]
                else:
                    n.update( {p : {}} )
                    n = n[p]
            n.update( { 'snip' : {} } ) # mark places to cut.
    sys.stdout.write('\nFound {0} clades at {1} level.\nCollapsing... '
                     .format(collapseCount, collapse_at_rank ) )
    sys.stdout.flush()
    n_collapsed = 0
    ## Second iteration. Delete any nodes beyond the desired node.
    if in_files is None:
        in_files = sys.stdin
    if combined:
        accs = [[] for i in xrange(len(in_files))]
        get_accs = lambda node: get_combined_accessions(node, accessions=accs[:])
    else:
        get_accs = lambda node: get_accessions(node, accessions=[])

    for path, node in dict_walk(full_path, to_collapse, topdown=False):
        if 'snip' not in node:
            continue
        real_node = results_dict
        pathList = path.split( os.path.sep )
        for p in pathList:
            real_node = real_node[p]
        accessions = get_accs(real_node)
        #if combined:
        #    accessions = get_combined_accessions(real_node,
        #            accessions=[ [] for i in xrange( len(in_files) )] )
        #            #accessions=[ [] for i in xrange( len(options['-in']) )] )
        #else:
        #    accessions = get_accessions( real_node , accessions=[] )
        for key in real_node.keys():
            del(real_node[key])
        n_collapsed += 1
        real_node.update({'accessions' : accessions})
    sys.stdout.write(' Collapsed {0} nodes.\n'.format( n_collapsed) )
    return results_dict

if __name__ == '__main__':
    sys.stderr.write('Not to be called directly from command line.\nExiting.\n')
    exit()
