#!/usr/bin/env python
import cPickle as pickle
import re
from ssummolib import dict_walk, TaxDB, get_accessions, getRanks, Options, fetchRank, collapse_at_rank, find_node_name, load_index, ITOLCGI
from dict_to_phyloxml import write_xml
from dict_to_html import initiate_html, write_html, close_html
import ArbIO
from Bio import SeqIO
import subprocess
import CONFIG
import os,sys
from colours import generate_HEX_colours

def get_name_at_rank( TaxDBObj, table, taxonName, rank = 'genus', parent=None ):
    """Searches the NCBI taxonomy database for similar names
    to the given taxonName, at the specified rank [genus].
    If we can find a unique taxon similar to that name, return
    the name as annotated in the NCBI.
    """
    queryRank,queryName = fetchRank( TaxDBObj, table, taxonName,parent_name=parent )
    if not queryRank:
        return None
    rankSearch = re.compile( '^'+re.escape( rank ),re.I )
    if rankSearch.search( queryRank ):
        return queryName
    # If the rank doesn't match, go up the tree.
    if not rankSearch.search( queryRank ):
        TaxDBObj.cur.execute( 'SELECT ParentName from {0} where Name="{1}";'.format(table,taxonName) )
        parents = set( r[0] for r in TaxDBObj.cur.fetchall() )
        if len(parents) == 1:
            found_parent = parents.pop()
            parent_rank,parent_name = fetchRank( TaxDBObj, table, found_parent )
            if not parent_rank:
                return None
            elif rankSearch.search( parent_rank ):
                return parent_name
            else:
                return get_name_at_rank( TaxDBObj, table , parent_name, rank=rank )
        elif len(parents) == 0:
            TaxDBObj.cur.execute('SELECT ParentName from {0} WHERE Name LIKE "{1}%";'.format( table, taxonName ) )
            parents = set( r[0] for r in TaxDBObj.cur.fetchall() )
            if len(parents) == 1:
                parent = parents.pop()
                queryRank,queryName = fetchRank( TaxDBObj, table, parent )
                if not queryRank:
                    return None
                elif rankSearch.search( queryRank ):
                    return queryName
                else:
                    return get_name_at_rank( TaxDBObj, table, parent, rank=rank )
                taxonName = parent
            else: # len(parents) == 0:
                next_taxon = taxonName.rsplit(' ',1)
                if len( next_taxon ) == 2:
                    return get_name_at_rank( TaxDBObj, table, next_taxon[0] , rank=rank,parent=parent )
                return None
        else:
            parent_ranks = {}
            for parent_name in parents:
                parent_rank , parent_name = fetchRank( TaxDBObj, table, parent_name )
                if rankSearch.search( parent_rank ):
                    return parent_name
            return None
    return parent

class Tally( object ):
    def __init__(self,*args,**kwargs):
        """Parses sequence headers from sequence file specified by kwargs['-in'].
        File format can be specified with kwargs['-format'].
        Optional argument defaults:-
          { '-kingdom' : 'Prokaryotes',   # Either Prokaryotes or Eukaryotes, for searching the taxonomy database.
            '-informat': 'fasta',         # Input sequence file format
            '-collapse-at-rank' : False,  # Collapse the SSUMMO tree at any specified rank
            '-rank'    : 'genus',         # Prints mismatches at specified rank.
          }
        """
        self.args = args
        for arg in args:
            self.arg = True
        self.options = kwargs
        required_kwargs = ['-in']
        opt_kwargs = { '-kingdom' : 'Prokyarotes' ,
                       '-informat' : 'fasta',
                       '-rank'     : 'genus'}
        for kwarg in required_kwargs:
            if kwarg not in self.options:
                raise ValueError( "Need to provide the kwarg '{0}' to Tally".format(kwarg) )
        for kwarg in opt_kwargs:
            if kwarg not in kwargs:
                self.options.update( { kwarg : opt_kwargs[kwarg] } )
        opt_kwargs['-rank'] = opt_kwargs['-rank'].lower()
        self._compile_regs()
        self.rank_matches = {}
    def _load_results( self ):
        results_file = self.options['-in'].rsplit('.',1)[0] + '.pkl'
        with file( results_file , 'rb' ) as result_handle:
            results = pickle.load( result_handle )
        return results            
    def _compile_regs( self ):
        self.unknowns = re.compile(r'(unidentified)|(incertae)|(unknown)|(uncultured)',re.I)
        self.candidates = re.compile( r'\s*(Candidate Division)\s*',re.I)
        self.genusReg = re.compile( r'genus',re.I )
        self.speciesReg = re.compile( r'(?<=,)species',re.I ) # need the lookbehind to get around subspecies
        self.spaceSub = re.compile( r'\s+' )  # In case typo's with multiple spaces.
        self.quoteSub = re.compile( r'[\'\"]' )
    def _parse_seq_headers( self ):
        if not os.path.exists(self.options['-in']):
            raise IOError("Must define a file name (Tally.options['-in'], or provide in **kwargs)")
        acc_species = {}  ;  n_seqs = 0
        desc_reg = re.compile( r'(^[^\s]+)\s+([^\s]*.*)$' )
        ##    First group is the first word in the sequence description (accession)
        ##    Second group is everything thereafter (hopefully, species info)
        with file( self.options['-in'] , 'r' ) as infile:
            for seq in SeqIO.parse( infile , self.options['-informat'] ):
                accession, info = desc_reg.search( seq.description ).groups()
                acc_species.update( { accession : info } )
                n_seqs += 1
        print '# Parsed {0} sequences from {1}'.format( n_seqs, self.options['-in'])
        self.n_seqs = n_seqs
        return acc_species

    def tally( self , tax_dict ):
        self.tb = TaxDB()
        self.acc_species = self._parse_seq_headers()
        results = self._parse_results( tax_dict )
        return results

    def _parse_results( self , tax_dict ):
        ranks = self.tb.get_ranks() 
        for rank in ranks:
            self.rank_matches.update( { rank : { 'found' :0 , 'matched' :0 , 'reg' : re.compile( rank, re.I ) } } )
        results_dict = self._load_results()
        if self.options['-collapse-at-rank'] is not False:
            results_dict = collapse_at_rank( results_dict , self.options, self.tb, tax_dict )
        matches = self._compare( results_dict , tax_dict )
        return matches

    def _compare( self , results , tax_index ):
        """ For each node in the results tree, we'll figure out its list of ranks up until
        that taxon, and also look for the pre-annotated name in our taxonomy database. We
        get as much information as possible, and for each accession, pass it to 
        _print_mismatches() to check."""

        result_count = 0
        print '{0} {1}'.format( '# Header species'.ljust(50),'SSUMMO species'.ljust(50) )
        for accessions,depth,tax_path in fetch_matches(results):   # target = SSUMMO annotation, query = sequence annotation
            tax_path = tax_path.split( os.path.sep )
            ranks, table= getRanks( tax_path, self.tb )
            for accession in accessions:
                # get the ranks of the last nodes ssummo assigned to.
                result_count += 1
                species = self.acc_species[accession] # Species parsed from query sequence header.
                queryName = self.quoteSub.sub( '', ' '.join( self.spaceSub.split(species,4)[:4] ))  # Split by spaces, only accepting species names up to 4 words long.
                try:
                    parent = tax_path[-2]
                except IndexError:
                    # Then the accession never got assigned anywhere
                    if tax_path == ['']:
                        continue
                    else:
                        parent = 'root'
                queryRank, name = fetchRank( self.tb,table, queryName, parent_name = parent )
                if not queryRank:   # If we don't find a rank for the name in the sequence header, try the other table.
                    if table == 'Eukaryotes':
                        queryRank,name = fetchRank( self.tb, 'Prokaryotes',queryName, parent_name = parent )
                        newTable = 'Prokaryotes'
                    elif table == 'Prokaryotes':
                        queryRank,name = fetchRank( self.tb, 'Eukaryotes',queryName, parent_name = parent )
                        newTable = 'Eukaryotes'
                    if not queryRank:
                        sys.stderr.write( "Can't find a rank for {0} / {1} in either table.\n".format(' '.join( self.spaceSub.split(species,4)[:4]),queryName ) )
                        queryRank = 'unknown'
                    else:
                        table = newTable
                        queryName = name
                else:
                    queryName = name
                self._print_mismatches( ranks, tax_path, table, queryName )
                continue
            continue
        return self.rank_matches

    def _print_mismatches( self , ranks, tax_path , table, queryName ):
        """
        """
        assigned_ranks = {} ; matched = {}
        for i, rank in enumerate( ranks ):
            assigned_ranks.update( { rank : tax_path[i] } )
            matched.update( { rank : False } )
        rank_names = {}
        for i,rank in enumerate( ranks[::-1] ):
            real_name = get_name_at_rank( self.tb , table , queryName , rank=rank )
            if real_name is None:
                rank_names.update( { rank : "unknown" } )
                continue
            elif real_name == assigned_ranks[rank]:
                self.rank_matches[rank]['matched'] += 1
                matched[rank] = True
            rank_names.update( { rank : real_name } )
            self.rank_matches[rank]['found'] += 1
        if self.options['-rank'] not in matched.keys() or rank_names[ self.options['-rank'] ] == 'unknown':
            if self._match_first_word( tax_path, queryName ):
                self.rank_matches[ self.options['-rank'] ]['found'] += 1
                print "{0} {1} # First word matched but found {2} didn't.".format( queryName.ljust(50) , ' / '.join(tax_path[-2:]).ljust(50) , self.options['-rank']),
            else:
                print "{0} {1} #".format( queryName.ljust(50)   ,   ' / '.join(tax_path[-2:]).ljust(50)   ),
            print  " No {0} in tax_path: {1}".format( self.options['-rank'] , '/'.join(tax_path) )
        elif not matched[self.options['-rank']]:
            if self._match_first_word( tax_path, queryName ):
                print "{0} {1} # First word matched but found {2} didn't".format( queryName.ljust(50) , ' / '.join(tax_path[-2:]).ljust(50) , self.options['-rank']),
        return

    def _substitute( self, tax_path ):
        """Deal with unknown // Candidate name matches. 
        If the leading text in a taxa's name is one of:-
          unknown
          unidentified
          Incertae sedis
          incertae
          uncultured
        then the last two taxa in the ARB taxonomy path
        are combined and the matched text is stripped, leaving
        only the remaining taxonomic information from those
        last two taxa.

        If "Candidate Division " is matched in the taxa's
        name, then this is stripped out.
        """
        weak = False
        try:
            if self.unknowns.search(tax_path[-1]):
                targetName = str(' '.join( tax_path[-2:] ) )
                weak = True
                if self.candidates.search(targetName):
                    targetName = self.candidates.sub('',targetName).strip()
            elif self.candidates.search(tax_path[-1]):
                weak = True
                targetName = self.candidates.sub('',tax_path[-1]).strip()
        except IndexError:
            print tax_path
            raise
        if not weak:
            targetName = tax_path[-1]
        return targetName
    
    def _match_first_word( self, tax_path, queryName ): 
        targetName = self._substitute( tax_path )
        inferred_query_genus = self.spaceSub.split(targetName)[0]  # just taking the first word
        if inferred_query_genus == self.spaceSub.split( queryName )[0]:
            #self.rank_matches[ self.options['-rank'] ]['matched'] += 1
            return True


def tallyDict(infile,options, taxDict):
    """Give the name of the original results file, and this will check
    the pickled dictionary results from SSUMMO against the sequence
    headers in that file. Tallies are printed to screen"""
    TaxDBObj = TaxDB( )
    resultFile = infile[:infile.rfind('.')] + '.pkl'
    inHandle = file(infile,'r')
    accSpecies = {}
    desc_reg = re.compile( r'^(\S+)\s+(\S*.*)[\n\r]?$' )
    spaceSub = re.compile( r'\s+' )  # In case typo's with multiple spaces.
    try:
        for seq in SeqIO.parse( inHandle, options['-informat'] ):
            accession, info = desc_reg.search( seq.description ).groups()
            accSpecies.update( { accession : spaceSub.sub(' ', info) } )
    except AttributeError:
        print seq.description
        raise
    finally:
        inHandle.close()
    nSeqs = len( accSpecies )
    print "# {0} sequences read from {1}".format( nSeqs, inHandle.name )
    with file(resultFile,'rb') as resultHandle:
        resultDict = pickle.load(resultHandle)

    if options['-collapse-at-rank'] != False:
        resultDict = collapse_at_rank( resultDict , options,TaxDBObj, taxDict )

    seqCount = 0
    nAgreeGenus = 0
    nAgreeSpecies = 0
    unknowns = re.compile(r'(unidentified)|(incertae)|(unknown)|(uncultured)',re.I)
    candidates = re.compile( r'\s*(Candidate Division)\s*',re.I)
    genusReg = re.compile( r'genus',re.I )
    speciesReg = re.compile( r'(?<=,)species',re.I ) # need the lookbehind to get around subspecies
    mismatches = {}
    print '# Header species'.ljust(45),' SSUMMO species'.rjust(45)
    for accessions,depth,taxPath in fetch_matches(resultDict):   # target = SSUMMO, query = novel sequences
        taxPath = taxPath.split( os.path.sep )
        #taxPath = [levels[i] for i in sorted(levels.keys())]
        for accession in accessions:
            ranks, table= getRanks( taxPath[:depth+2], TaxDBObj )
            seqCount += 1
            species = accSpecies[accession] # Species parsed from query sequence header.
            weak = False
            if unknowns.search(taxPath[depth]):
                #targetName = str(' '.join( taxPath[depth:depth+1] ) )
                targetName = str(' '.join( taxPath[depth:] ) )
                weak = True
                if candidates.search(targetName):
                    targetName = candidates.sub('',targetName).strip()
            elif candidates.search(taxPath[depth]):
                weak = True
                targetName = candidates.sub('',taxPath[depth]).strip()
            if not weak:
                targetName = taxPath[depth]
            queryName = ' '.join( spaceSub.split(species,2)[:3])  # Split by spaces
            try:
                queryRank,name = fetchRank( TaxDBObj,table, queryName, parent_name = taxPath[-2] )
            except Exception:
                queryRank,name = fetchRank( TaxDBObj,table, re.sub( r'[\'\"]','',queryName), parent_name = taxPath[-2] )
            if not queryRank:
                queryName = spaceSub.split(queryName,1)[0]
                queryRank,name = fetchRank( TaxDBObj,table,queryName)
                if not queryRank:  ## Double check the other table...
                    if table == 'Eukaryotes':
                        queryRank,name = fetchRank( TaxDBObj, 'Prokaryotes',queryName, parent_name = taxPath[-2] )
                        newTable = 'Prokaryotes'
                    elif table == 'Prokaryotes':
                        queryRank,name = fetchRank( TaxDBObj, 'Eukaryotes',queryName, parent_name = taxPath[-2] )
                        newTable = 'Eukaryotes'
                    if not queryRank:
                        sys.stderr.write( "Can't find a rank for {0} / {1} in either table.\n".format(' '.join(re.split(r'\s+',species,2)[:2]),queryName ) )
                        queryRank = ''
                    else:
                        table = newTable
                        queryName = name
            else:
                queryName = name
            if not queryRank or not genusReg.search( queryRank ):
                RealGenus = get_name_at_rank( TaxDBObj,table,queryName,rank='genus' )
                if RealGenus != None:
                    queryGenus = RealGenus
                else:
                    queryGenus = spaceSub.split(queryName,1)[0]
            else:
                queryGenus = spaceSub.split(queryName,1)[0]
            realFamily = get_name_at_rank( TaxDBObj,table,queryName,rank='family')
            targetInfo = spaceSub.split(targetName)  # Turn ssummo taxon. into list, separated by spaces
            targetGenus = targetInfo[0]
            if len(targetInfo) > 1:   # If more than one word in ARB annotation
                targetSpecies = targetInfo[1]
            else:
                targetSpecies = ''
            ##### MATCHING QUERY NAME TO SSUMMO NODE NAME SECTION ####
            if targetGenus == queryGenus:  ## If text searching worked
                nAgreeGenus += 1
                if speciesReg.search(queryRank):
                    if targetSpecies == queryName:
                        nAgreeSpecies += 1
            else:               ## Else do some cunning genus finding and try to match them up
                if ranks != None and len(taxPath[1:depth+1]) > 0 :
                    pass
                else:
                    print queryName.ljust(45),' '.join([targetGenus,targetSpecies]).rjust(45)
                    continue
                genusMatch = genusReg.search( ','.join(ranks) )    # create an index to find the taxon name with rank genus
                speciesMatch = speciesReg.search(','.join(ranks) ) # as above but for species
                if genusMatch and speciesMatch:
                    ## actual genus & species. Both found from mysql DB.
                    genus = taxPath[1:depth+1][ ranks.index( genusMatch.group() ) ]
                    speciesInd = ','.join(ranks)[ : speciesMatch.start() ].count(',')
                    try:
                        species = taxPath[1:depth+1][ ranks.index( speciesMatch.group() ) ]
                    except ValueError:
                        print speciesMatch.group()
                        print ranks
                        raise
                    if genus == queryGenus:
                        nAgreeGenus += 1
                    else:
                        print queryName.ljust(45),' '.join([targetGenus,targetSpecies]).ljust(45)
                        mismatches.update( {accession:[queryName,queryRank,taxPath[1:],ranks]} )
                    if species.startswith( ' '.join(queryName.split(' ')[:2])):
                        nAgreeSpecies += 1
                elif genusMatch:
                    genus = taxPath[1:depth+1][ranks.index(','.join(ranks)[genusMatch.start() : genusMatch.end() ] )]
                    if genus == queryGenus:
                        nAgreeGenus += 1
                    else:
                        print queryName.ljust(45),', '.join(taxPath[depth-1:depth+1]).ljust(45), '** Only found matching taxon names up to genus specificity. **'
                        mismatches.update( {accession:[queryName,queryRank,taxPath[1:],ranks]} )
                else:
                    rankDicts = []
                    for i in range(len(ranks)):
                        rankDicts.append( { ranks[i] : taxPath[1:depth+1][i] } )
                    mismatches.update( {accession:[queryName,queryRank,taxPath[1:],ranks]} )
                    print queryName.ljust(40) , ' '.join([targetGenus,targetSpecies]).ljust(45), 'No genus in:',repr( ', '.join([rankDict.values()[0] for rankDict in rankDicts]) )[1:-1].strip()
    print 'Found {0} accessions assigned to a node'.format(seqCount)
    print '{0} agree at the genus level'.format(nAgreeGenus)
    print '{0} agree at species level'.format(nAgreeSpecies)
    return mismatches


def fetch_matches(results_dict, cur_key='root',parent_key='',depth=0,path=None):
    """Provide a dictionary of SSUMMO results 'dict'. This will recursively
    iterate through that dictionary, yielding the list of accessions assigned
    to a node, the depth at which that node is, and the path to that level.
    The path is returned either relative to the calling directory, or to any
    supplied `path`.
    N.B. Ambiguous matches may not reside at the bottom level, so use depth to
    index the bottom node where those accessions were retrieved!
    """
    if path is None:
        path = ''
    node = results_dict
    for taxa in node:
        if taxa == 'accessions':
            yield node['accessions'],depth,path
            continue
        for x in fetch_matches(node[taxa], cur_key = taxa,parent_key=cur_key, depth = depth + 1,path = os.path.join( path , taxa )):
            yield x

def SSUMMO_to_dict(SSUMMO_output):
    """Provide the file name for the SSuMMo version 0.1 output, and this will return
    a python dictionary representation.
    Designed to be used with dict_to_html.py and dict_to_phyloxml.py
    """
    assert type(SSUMMO_output) == str
    with file(SSUMMO_output,'rU') as in_handle:
        phylodict = {}
        line_count = 0
        for line in in_handle:
            line_count += 1
            if line_count == 1:
                continue
            cells = []
            for cell in line.strip().split('\t'):
                cells.append(cell)
            name = cells[0]
            ssummo_genus = cells[1]
            taxonomy = ssummo_genus.split('/')
            start = taxonomy.index('arbDB')
            node = phylodict
            for OTU in taxonomy[start+1:]:
                if OTU in node.keys():
                    node = node[OTU]
                else:
                    node.update({OTU:{}})
                    node = node[OTU]
            if 'accessions' in node.keys():
                node['accessions'].append(name)
            else:
                node.update({'accessions':[name]})
    print "{0} genus' parsed from file".format(line_count)
    return phylodict

def make_ITOL_graphs(input_file_name,SSUMMO_dict):
    """
    input_file_name is 
    """
    new_colours = [ colour for colour in generate_HEX_colours(2) ]
    dataset_name = input_file_name[:input_file_name.rfind('.')] + '_ITOL_graphs.txt'
    uncertains = re.compile( r'(uncultured)|(unknown)|(incertae)',re.I )
    found = set()
    with file( dataset_name , 'w') as ITOL_handle:
        #ITOL_handle.write('LABELS,% sequences assigned to this taxa,% seqs in other taxa from same parent node\n')
        ITOL_handle.write('LABELS,% Sequences assigned to this taxa\n')

        #ITOL_handle.write('COLORS,{0},{1}\n'.format(new_colours[0], new_colours[1]))
        ITOL_handle.write('COLORS,{0}\n'.format(new_colours[0] ))
        total = float( len( get_accessions( SSUMMO_dict , accessions = [] ) ) )
        for tax_path, node in dict_walk( '', SSUMMO_dict,random=False ):
            if 'accessions' in node.keys():
                tax_nodes = tax_path.split( os.path.sep )
                if uncertains.search( tax_nodes[-1] ) and not uncertains.search( tax_nodes[-2]):
                    last_node = re.compile( '|'.join( tax_nodes[-1].split(' ') ) )
                    if not last_node.search( tax_nodes[-2] ):
                        tax_node = ' '.join( tax_nodes[-2:] )
                    else:
                        tax_node = tax_nodes[-1]
                else:
                    tax_node = tax_nodes[-1]
                n = 0   ## Make each node name unique
                while tax_node in found:
                    tax_node = tax_node.rstrip( '_{0}'.format(n) )
                    n += 1
                    tax_node = '{0}_{1}'.format(tax_node,n)
                found.add( tax_node )
                output = tax_node 
                if 'accessions' in node:
                #for i in xrange( len( node['accessions'] ) ):
                    n_accessions_at_node = len( node['accessions'] )
                    if len( node ) > 1:
                        total_from_node = len( get_accessions( node, accessions = [] ) )
                        #total_from_node = len( get_accessions( node, accessions=[] ) )
                        output += ',R50,{0}\n'.format( 100 * n_accessions_at_node / total) #, 100* (total_from_node - n_accessions_at_node) / total  )
                    else:
                        output += ',{0}\n'.format( 100 *  n_accessions_at_node / total )
                ITOL_handle.write( output )

        #for queries,depth,levels in fetch_matches(SSUMMO_dict):
        #    n = len(queries)
        #    path = [levels[i] for i in range(depth+1)]
        #    end_node = path[-1]
        #    tdict = SSUMMO_dict
        #    for OTU in path[1:-1]:
        #        tdict = tdict[OTU]
        #    n_brothers = len(tdict.keys())
        #    node = tdict
        #    if end_node == 'root':
        #        n = len(get_accessions(node, accessions = [] ) )
        #        remaining = 0
        #    else:
        #        n = len(get_accessions(tdict[end_node], accessions = [] ) )
        #        if 'accessions' in node:
        #            total = n + len( node['accessions'] )
        #        else:
        #            total = n
        #        node = tdict[end_node]
        #        remaining = total - n
        #    if uncertains.search( end_node ) and not uncertains.search(path[-2]):
        #        last_node = re.compile( '|'.join( end_node.split(' ') ) )
        #        if not last_node.search( path[-2] ) :
        #            end_node = ' '.join(path[-2:])
        #    n = 0
        #    while end_node in found:  ## Make node names unique
        #        end_node = end_node.rstrip( '_{0}'.format(n) )
        #        n += 1
        #        end_node = '{0}_{1}'.format(end_node,n)
        #    found.add( end_node )
        #    path[-1] = end_node
        #    node_keys = node.keys()
        #    if remaining == 0:
        #        ITOL_handle.write('{0},{1},{2}\n'.format(end_node,n,0) )
        #    else:
        #        ITOL_handle.write('{clade},R50,{n:.0f},{r:.0f}\n'.format(clade=end_node, n=n, r=remaining))
    return


def find_path( FINDPath,tdict ):
    nToMatch = len(FINDPath)
    topNode = tdict
    for path,node in dict_walk( '',topNode,topdown=False ):
        taxPath = path.split( os.path.sep )
        if FINDPath[0] in taxPath:
            nMatched = 0
            taxIndex = taxPath.index(FINDPath[0])
            for key in taxPath[taxIndex:]:
                if key in FINDPath:
                #   print path, FINDPath
                    nMatched += 1
            if nMatched == nToMatch:
                break
    node = tdict
    for startPlace in taxPath[:taxIndex]:
        node = node[startPlace]
    return path,node
    sys.stderr.write("Can't find the start directory!!\n")


def send_to_phyloxml(SSUMMO_output,phylodict,taxdict = None):
    """Sends output from SSUMMO_to_dict to phyloxml converter.
                ITOL.dataset_options['
                ITOL.dataset_options['
                ITOL.dataset_options['
    """
    if taxdict == None:
        taxdict = load_index()
    with file(SSUMMO_output[:SSUMMO_output.rfind('.')]+'.xml','w') as xml_out:
        top = os.getcwd()
        write_xml(top,xml_out,phylodict,taxdict)
    print "Written phyloxml output to {0}".format(xml_out.name)
    queries = []
    queries = get_accessions(phylodict,accessions=[])
    print "{0} sequences assigned to an OTU".format(len(queries))
    return




def compare_to_training( options, mismatches,tdict, TaxDBObj ):
    if options['-training-data'] == None:
        raise KeyError( "You must specify the sequence file used to train the models with '-training-data'!!")
    trainingDB = ArbIO.ArbIO( inHandle = file( options['-training-data'],'r') ,index=True )
    testDB = ArbIO.ArbIO( inHandle = file( options['-in'], 'r'), index=False )
    HMMPaths = []
    for val in mismatches.values():
        if val[2] not in HMMPaths:
            HMMPaths.append( val[2])   ## appending path lists to the problematic HMMs.
    if not os.path.exists( 'hmmcompare_results'):
        os.makedirs( 'hmmcompare_results')
    if options['-collapse-at-rank'] != False:
        collapseReg = re.compile( re.escape( options['-collapse-at-rank']),re.I)
    for hmm in HMMPaths:  # Where hmm is a list representing the path to the directory containing the HMM.
        testSeqs = []
        targetRanks = []
        testNames = []
        for acc in mismatches.keys():
            assignedPath = mismatches[acc][2]
            if assignedPath == hmm[-len(assignedPath):]:
                testSeqs.append( acc )  # Get the test sequences allocated to this node.
                targetRanks =  mismatches[acc][3]
                testNames.append( mismatches[acc][0] )
        if options['-collapse-at-rank'] != False:
            for rank in targetRanks:
                if collapseReg.search( rank ):
                    delInd = targetRanks.index(rank) + 1
                    del( targetRanks[delInd:])
                    del( hmm[delInd:])
        targetRanks = mismatches[acc][3]
        ## Go to the bottom node.
        divergedPath = []
        if hmm[0] not in tdict.keys():
            try:
                fullPath,topNode = find_path( hmm,tdict )  ## fullPath is where the sequence was assigned. topNode just takes into account the "-start" option in SSUMMO.
            except Exception:
                print hmm, tdict.keys()
                raise
            full_path_list = fullPath.split( os.path.sep )
            for path_component in full_path_list:
                if path_component != hmm[0]:
                    divergedPath.append( path_component )
                else:
                    break
        else:
            fullPath = os.path.sep.join( hmm )
            topNode = tdict
        #### Find where they diverge!!
        found = False
        for i in xrange(len(hmm),0,-1):
            node = topNode
            node_names = hmm[:i]
            for node_name in node_names:
                node = node[node_name]
            for queryName in testNames:
                rel_path = find_node_name( queryName, node )
                if rel_path != None:
                    found = True
                    break
            if found:
                divergedPath +=  node_names 
                break
        diverged_path = os.path.sep.join( divergedPath )
        real_path = os.path.join( diverged_path, rel_path )             ## Where it should have been assigned, according to annotation
        different_dirs =  fullPath[len(diverged_path):].strip( os.path.sep )        ## Difference between where SSUMMO's assignment diverged, and where it should be.
        different_dir_list = different_dirs.split( os.path.sep )
        assigned_path = os.path.join( diverged_path, different_dir_list[0] ) ## One directory from the divergent directory, towards where it was assigned.
        real_hmm = os.path.join( CONFIG.arbDBdir,diverged_path,rel_path, rel_path + '.hmm' )
        assigned_hmm = os.path.join( CONFIG.arbDBdir,assigned_path , different_dir_list[0] + '.hmm' )
        ## diverged_path is where they diverged.
        ## different_dir_list[0] is the next directory on the path to where it was assigned.
        ## rel_path is the next directory on the path to where it should have gone (according to probably incorrect annotation).
        if not os.path.exists( os.path.join( 'hmmcompare_results',hmm[-1] )):
            os.makedirs( os.path.join( 'hmmcompare_results',hmm[-1]  ))
        ### Run HMMcompare on the node where the annotation says it should have gone.
        accessions = get_accessions( node[rel_path], accessions=[] )
        HMMCompare = subprocess.Popen( [os.path.join(CONFIG.top,'bin','HMMcompare.py'),'--consensus','-out',os.path.join('hmmcompare_results',hmm[-1],rel_path+'.txt'),'-hmm',real_hmm,'-in','/dev/stdin'],shell=False,stdin=subprocess.PIPE ) ## Could specify -ncpus.
        for accession in accessions:  ## Write in the training sequences
            seqRecord = trainingDB[accession]
            HMMCompare.stdin.write( '>{0}\n{1}\n'.format( seqRecord.id , trainingDB.arbSeqToStr( seqRecord.seq.tostring() ) ) )
        for accession in testSeqs:   ## Write in the test sequences
            HMMCompare.stdin.write( testDB[accession].format('fasta') )
        HMMCompare.stdin.close()
        retCode = HMMCompare.wait()
        ### Run HMMcompare on the node where SSUMMO (incorrectly) assigned the test sequence..
        accessions = get_accessions( node[different_dir_list[0]], accessions=[] )
        HMMCompare = subprocess.Popen( [os.path.join(CONFIG.top,'bin','HMMcompare.py'),'--consensus','-out',os.path.join('hmmcompare_results',hmm[-1],different_dir_list[0]+'.txt'),'-hmm',assigned_hmm,'-in','/dev/stdin'],shell=False,stdin=subprocess.PIPE )
        for accession in accessions:
            seqRecord = trainingDB[accession]
            HMMCompare.stdin.write( '>{0}\n{1}\n'.format( seqRecord.id , trainingDB.arbSeqToStr( seqRecord.seq.tostring() ) ) )
        for accession in testSeqs:   ## Write in the test sequences
            HMMCompare.stdin.write( testDB[accession].format('fasta') )
        HMMCompare.stdin.close()
        retCode = HMMCompare.wait()

def parseArgs(args):
    CONFIG.options = Options()
    CONFIG.options.options = {
          '-collapse-at-rank' : False,
          '-in': sys.stdin ,
          '-informat':'fasta',
          '-rank' : 'genus',
          '--compare-to-training' : False,
          '-training-data' : None,
          '--tally':False ,
          '--buildxml':False ,
          '--upload' : False,
          '--download': False,
          '--buildhtml':False
          } 
    CONFIG.options.helpTxt = {'-collapse-at-rank' : 'Choose a taxonomic rank, and any output will only go down to that rank.',
          '-in': 'Sequence file used in the SSUMMO analysis. (The program will find the results it needs).',
          '-training-data' : 'If using --compare-to-training, you must specify the sequence file used for training the models.',
          '-informat' : 'Format of original sequence file',
          '-rank'   : 'Rank to print out mismatch information for',
          }
    CONFIG.options.switches = { 
          '--tally': 'Tally inconsistencies between annotated data and the SSUMMO allocations.',
          '--buildxml' : 'Build a phyloxml tree of the results. Saves to [inName].xml.',
          '--buildhtml': 'Build html output of the results. Saves to [inName].html. Make sure the js and css files are linked\n\tcorrectly.',
          '--compare-to-training':'This flag can be used to get the sequence scores that the original training sequences are\n\trun with HMMcompare.py',
          '--upload' : 'Upload phyloxml with graph & colour files to ITOL [False].',
          '--download' : 'Download image file of phyloxml tree [False].'
          }
    CONFIG.options.multiargs = []
    CONFIG.options.singleargs = ['-collapse-at-rank','-in','-training-data','-rank']
    CONFIG.options.parseArgs(args)
    return CONFIG.options

def tally(results):
    """ Function for comparing delimited text files against
    SSUMMO allocations. Deprecated in favour of comparing
    names in the dictionary saved in .pkl results file.
    """
    results_file = file(results,'r')
    total = 0
    wins = 0
    results_file.readline()
    for line in results_file:
        results_list = line.split('\t')
        if len(results_list) < 2:
            break
        orig_name = results_list[0]
        orig_genus = orig_name[:orig_name.find(' ')].strip()
        SSUMO_genus = results_list[1]
        _genus = SSUMO_genus[SSUMO_genus.rfind('/',0,-2)+1:].strip()
        total += 1
        if _genus == orig_genus:
            wins += 1
        else:
            print orig_name,'\t', SSUMO_genus
            continue
    results_file.close()
    wins = float(wins)
    total = float(total)
    print '%.2f%% accurate' % (wins*100/total)
    print 'Got %i out of %i right' % (int(wins),int(total))


if __name__ == '__main__':
    args = sys.argv[1:]
    options = parseArgs(args)

    sys.stdout.write( 'Loading whole taxonomy index.')
    sys.stdout.flush()
    with file(os.path.join( CONFIG.top, CONFIG.taxIndex) ,'rb') as inFile:
        taxDict = pickle.load(inFile)
    sys.stdout.write( ' Done.\n' )

    if options['--tally']:
        tal = Tally( **options.options )
        results = tal.tally(taxDict)
        print '\n\n{0}{1}'.format( 'RANK'.ljust(50) , 'N Matched'.ljust(50) )
        for rank in results:
            result = results[rank]
            if result['found'] > 0:
                print '{0}{1}'.format( rank.ljust(50) , '{0} / {1} = {2}'.format(result['matched'] , result['found'] , 100.*(result['matched']/float(result['found']))).ljust(50) )
        exit()

    elif sys.argv[1] == 'tallytext':  # Deprecated.
        tally('SSUMMO_results.txt')

    if options[ '--buildxml']:   # Deprecated.
        inName = options['-in']
        pickledName = inName[:inName.rfind('.')] + '.pkl'
        with file(pickledName,'rb') as resultsHandle:
            resultsDict = pickle.load(resultsHandle)
        if options['-collapse-at-rank'] != False:
            TaxDBObj = TaxDB( )
            resultsDict = collapse_at_rank( resultsDict , options, TaxDBObj, taxDict )
            del( TaxDBObj )
        send_to_phyloxml(inName,resultsDict,taxdict=taxDict)
        make_ITOL_graphs(inName,resultsDict)
        if options['--upload']:
            suffix = options['-in'][ : options['-in'].rfind('.' ) ]
            colour_file_name = suffix + '_ITOL_colours.txt'
            graph_file = suffix + '_ITOL_graphs.txt'
            out_file = file( suffix + '.xml' ,'r' )
            with file( colour_file_name ,'r' ) as colourFile:
                ITOL = ITOLCGI( out_file = out_file, colour_file = colourFile )
                ITOL.dataset_options['MultiBarAlign'] = '0'
                ITOL.add_dataset( graph_file )
                uploader = ITOL.ITOLupload( )
            if options['--download']:
                if len(options['-out-format']) == 0:
                    options['-out-formats'].append('pdf')
                ITOL.ITOLdownload( formats=options['-out-formats'])
            out_file.close()
        #ITOLupload(inName)

    if options['--buildhtml']:  # Proper deprecated. Instead, use 'python dict_to_html.py <results>.pkl'
        try:
            in_name = sys.argv[2]
        except IndexError:
            print "Please provide an output text file from SSUMMOv1"
            raise IOError
        phylodict = SSUMMO_to_dict(in_name)
        if options['-collapse-at-rank'] != False:
            TaxDBObj = TaxDB( )
            resultsDict = collapse_at_rank( resultsDict , options, TaxDBObj, taxDict )
            del( TaxDBObj )
        output_name = in_name[:in_name.rfind('.')] + '.html'
        write_handle = file(output_name,'w')
        initiate_html(CONFIG.top, write_handle)
        write_html(CONFIG.top, write_handle, phylodict)
        close_html(CONFIG.top,write_handle)
        os.system('chmod 755 html_from_walk.htm')
        if not write_handle.closed:
            write_handle.close()

    if options['--compare-to-training']:
        if not options['--tally']:
            tal = Tally( **options.options )
            results = tal.tally(taxDict)
            #mismatches = tallyDict( options['-in'] , options , taxDict)
        with file(CONFIG.taxIndex,'rb') as taxFile:
            tdict = pickle.load(taxFile)
        TaxDBObj = TaxDB( )
        #compare_to_training(options,mismatches,tdict,TaxDBObj)
        compare_to_training(options,results,tdict,TaxDBObj)
