#!/usr/local/bin/python2.7
# encoding: utf-8
'''
scripts.heatmap -- shortdesc

scripts.heatmap is a description

It defines classes_and_methods

@author:     user_name
        
@copyright:  2013 organization_name. All rights reserved.
        
@license:    license

@contact:    user_email
@deffield    updated: Updated
'''

import sys
import os
import collections
import operator
import logging

from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter

# third-party imports
import pymongo
import numpy as np

__all__ = []
__version__ = 0.1
__date__ = '2013-12-04'
__updated__ = '2013-12-04'

DEBUG = 1
TESTRUN = 0
PROFILE = 0

ROW_META = 'metadata'
COL_META = 'samples'
CONFIG = 'config'
SAMPLE_SETS = 'sample_sets'
RESULTS = 'reports'
MERGED = 'merged'

# constants that change based on database schema version
SORTCOLLECTION = MERGED
SORTBY = 'ss_nes'
    
class CLIError(Exception):
    '''Generic exception to raise and log different fatal errors.'''
    def __init__(self, msg):
        super(CLIError).__init__(type(self))
        self.msg = "E: %s" % msg
    def __str__(self):
        return self.msg
    def __unicode__(self):
        return self.msg

def get_sample_set_id(db, sample_set_name):
    coll = db[SAMPLE_SETS]
    spec = {'name': sample_set_name}
    ss_id = coll.find_one(spec, ['_id'])['_id']
    return ss_id

def get_transcript_metadata(db, t_id, fields=None):
    coll = db[ROW_META]
    spec = {'_id': t_id}
    r = coll.find_one(spec, fields)
    return r

def get_top_transcripts(db, ss_id, fdr_threshold, topn, sortorder):
    # get results for this sample set id
    coll = db[SORTCOLLECTION]
    spec = { 'ss_id': ss_id }
    proj = ['t_id', 'nes', 'ss_fdr_q_value']
    # get top transcripts up/down
    cur = coll.find(spec, proj)
    cur.sort(SORTBY, sortorder)
    cur.limit(topn)
    top_transcripts = []
    for res in cur:
        if res['ss_fdr_q_value'] > fdr_threshold:
            continue
        top_transcripts.append((res['t_id'], res['nes']))
    return top_transcripts

def get_heatmap_data(db, t_ids, ss_ids):
    coll = db[RESULTS]
    data = np.zeros((len(t_ids),len(ss_ids)), dtype=np.float)
    ss_ind_map = dict((ss_id,i) for i,ss_id in enumerate(ss_ids))
    for i,t_id in enumerate(t_ids):
        logging.debug("Row %d / %d" % (i, len(t_ids)))
        for r in coll.find({'t_id': t_id, 'ss_id': {'$in': ss_ids}},
                           {'ss_id': 1, 'nes': 1}):
            ss_id = r['ss_id']
            j = ss_ind_map[ss_id]
            data[i,j] = r['nes']
    return data
#    t_ind_map = dict((t_id,i) for i,t_id in enumerate(t_ids))
#     for j,ss_id in enumerate(ss_ids):
#         cur = coll.find({'t_id': {'$in': t_ids}, 'ss_id': ss_id},
#                         {'t_id': 1, 'nes': 1})
#         for r in cur:
#             t_id = r['t_id']
#             i = t_ind_map[t_id]
#             data[i,j] = r['nes']
#     return data
   

def get_heatmap_table(database, study, sample_set_names, fdr_threshold, topn, topfrac):
    client = pymongo.MongoClient(database)
    db = client[study]
    # count number of transcripts in the study
    coll = db[ROW_META]
    nrows = coll.count()
    # find number of transcripts to report per sample set
    if topn is not None:
        topn = min(nrows, topn)
    else:
        topfrac = max(0.0, topfrac)
        topfrac = min(1.0, topfrac)
        topn = int(round(nrows * topfrac))
    # get list of top transcripts
    logging.debug("Querying %d sample sets for top %d transcripts up/down "
                  "with FDR threshold <= %f" % 
                  (len(sample_set_names), topn, fdr_threshold))
    t_id_dict = {}
    ss_ids = []
    for sample_set_name in sample_set_names:
        logging.debug("\tSample Set: %s" % (sample_set_name))
        # convert sample set names to id
        ss_id = get_sample_set_id(db, sample_set_name)
        ss_ids.append(ss_id)
        top_transcripts = get_top_transcripts(db, ss_id, fdr_threshold, topn, sortorder=-1)
        logging.debug("\tFound %d transcripts" % (len(top_transcripts)))
        # merge sample set t_ids
        for t_id,nes in top_transcripts:
            if t_id in t_id_dict:
                cur_nes = t_id_dict[t_id][1]
            else:
                cur_nes = 0.0 
            if abs(nes) > abs(cur_nes):
                logging.debug("\tReplaced transcript %d NES=%f with NES=%f" % (t_id, cur_nes, nes))
                t_id_dict[t_id] = (ss_id, nes)
    # sort t_ids by ss_id
    logging.info("Found %d total transcripts" % (len(t_id_dict)))
    logging.debug("Ordering transcripts by sample set and NES")
    ss_id_dict = collections.defaultdict(lambda: [])
    for t_id in t_id_dict:
        ss_id, nes = t_id_dict[t_id]
        ss_id_dict[ss_id].append((nes, t_id))
    # order transcripts by sample set and sort by nes
    t_ids = []
    for ss_id in ss_ids:
        ss_t_ids = [tup[1] for tup in sorted(ss_id_dict[ss_id], 
                                             key=operator.itemgetter(0), 
                                             reverse=True)]
        t_ids.extend(ss_t_ids)
        #logging.debug('ss id ' + str(ss_id) + ' t ids ' + str(len(t_ids)))
        #t_ids = [tup[1] for tup in sorted(ss_id_dict_down[ss_id], key=operator.itemgetter(0))]
        #t_ids_down.extend(t_ids)
    # get nes data for entire heatmap
    logging.debug("Querying for heatmap data")
    data_up = get_heatmap_data(db, t_ids, ss_ids)
    #data_down = get_heatmap_data(db, t_ids_down, ss_ids)
    # output data
    logging.debug("Writing output")
    header_fields = ['name', 'enrichment', 'gene_id', 'locus', 'category', 'nearest_gene_names']
    header_fields.extend(sample_set_names)
    print '\t'.join(header_fields)
    for i,t_id in enumerate(t_ids):
        r = get_transcript_metadata(db, t_id)
        fields = [r['name'], 
                  'up',
                  r['gene_id'],
                  '%s[%s]' % (r['locus'], r['strand']),
                  '%s' % (r['category']),
                  '%s' % (r['nearest_gene_names'])]
        fields.extend(map(str,data_up[i,:]))
        print '\t'.join(fields)
#    for i,t_id in enumerate(t_ids_down):
#        r = get_transcript_metadata(db, t_id)
#        fields = [r['name'], 
#                  'down',
#                  r['gene_id'],
#                  '%s[%s]' % (r['locus'], r['strand']),
#                  '%s' % (r['category']),
#                  '%s' % (r['nearest_gene_names'])]
#        fields.extend(map(str,data_down[i,:]))
#        print '\t'.join(fields)


def main(argv=None): # IGNORE:C0111
    '''Command line options.'''
    
    if argv is None:
        argv = sys.argv
    else:
        sys.argv.extend(argv)

    program_name = os.path.basename(sys.argv[0])
    program_version = "v%s" % __version__
    program_build_date = str(__updated__)
    program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
    program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
    program_license = '''%s

  Created by user_name on %s.
  Copyright 2013 organization_name. All rights reserved.
  
  Licensed under the Apache License 2.0
  http://www.apache.org/licenses/LICENSE-2.0
  
  Distributed on an "AS IS" basis without warranties
  or conditions of any kind, either express or implied.

USAGE
''' % (program_shortdesc, str(__date__))

    try:
        # Setup argument parser
        parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
        parser.add_argument("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %(default)s]")
        parser.add_argument('-V', '--version', action='version', version=program_version_message)
        grp = parser.add_mutually_exclusive_group()
        grp.add_argument('-n', '--top-n', dest='topn', type=int, default=None,
                         metavar='N', 
                         help='limit to top/bottom N transcripts')
        grp.add_argument('-f', '--top-fraction', dest='topfrac', type=float, default=0.01,
                         metavar='FRAC',
                         help='limit to top/bottom FRAC fraction of transcripts')
        parser.add_argument('--fdr-threshold', type=float, default=0.05)
        parser.add_argument('database')
        parser.add_argument('study')
        parser.add_argument(dest='sample_sets_file',
                            help='file with list of sample sets to include ' 
                            '(one per line)')
        # Process arguments
        args = parser.parse_args()
        verbose = args.verbose
        database = args.database
        study = args.study
        sample_sets_file = args.sample_sets_file
        fdr_threshold = args.fdr_threshold
        topn = args.topn
        topfrac = args.topfrac
        if verbose > 0:
            level = logging.DEBUG
        else:
            level = logging.INFO
        logging.basicConfig(level=level,
                            format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        logging.info("Top N transcripts: %s" % (str(topn)))
        logging.info("Top FRAC transcripts: %s" % (str(topfrac)))
        logging.info("FDR threshold: %f" % (fdr_threshold))
        # read sample set names        
        sample_set_names = [x.strip() for x in open(sample_sets_file)]
        get_heatmap_table(database, study, sample_set_names, fdr_threshold, topn, topfrac)
        
        return 0
    except KeyboardInterrupt:
        ### handle keyboard interrupt ###
        return 0
#    except Exception, e:
#        if DEBUG or TESTRUN:
#            raise(e)
#        indent = len(program_name) * " "
#        sys.stderr.write(program_name + ": " + repr(e) + "\n")
#        sys.stderr.write(indent + "  for help use --help")
#        return 2

if __name__ == "__main__":
    if DEBUG:
        sys.argv.append("-v")
    if TESTRUN:
        import doctest
        doctest.testmod()
    if PROFILE:
        import cProfile
        import pstats
        profile_filename = 'scripts.heatmap_profile.txt'
        cProfile.run('main()', profile_filename)
        statsfile = open("profile_stats.txt", "wb")
        p = pstats.Stats(profile_filename, stream=statsfile)
        stats = p.strip_dirs().sort_stats('cumulative')
        stats.print_stats()
        statsfile.close()
        sys.exit(0)
    sys.exit(main())