'''
Created on Aug 24, 2009

@author: mkiyer
'''

from bx.intervals.cluster import ClusterTree
from veggie.coverage.covdb import get_datasets_from_sample_group2
from veggie.genome.chrom import get_chrom_names, get_chrom_length
from veggie.sample.samplegroup import parse_samplegroups_xml
from optparse import OptionParser
import collections
import glob
import h5py
import logging
import numpy as np
import os
import slidingwindow
import sys

def clustertree(intervals, cluster_distance, min_size=1):  
    from bx.intervals.cluster import ClusterTree
    # arguments to ClusterTree are:  
    # - Distance in basepairs for two reads to be in the same cluster;  
    #   for instance 20 would group all reads with 20bp of each other  
    # - Number of reads necessary for a group to be considered a cluster;  
    #   2 returns all groups with 2 or more overlapping reads  
    cluster_trees = collections.defaultdict(lambda: ClusterTree(cluster_distance, min_size))    
    id = 0
    for chrom, start, end in intervals:
        cluster_trees[chrom].insert(start, end, id)
        id += 1
    result = []
    for chrom, cluster_tree in cluster_trees.items():
        for start, end, ids in cluster_tree.getregions():
            result.append((chrom, start, end))     
    return result

def median_foldchange(t_windows, c_windows):    
    t_medians = np.median(t_windows, axis=1)
    c_medians = np.median(c_windows, axis=1)
    return t_medians / c_medians

def ttest(t_windows, c_windows):
    from scipy.stats import ttest_ind    
    tstats, pvals = ttest_ind(t_windows, c_windows, axis=1)
    return tstats, pvals

def cluster(indexes, wsize):
    intervals = []
    if len(indexes) == 0:
        return intervals
    start = 0
    while start < len(indexes):
        end = start
        while end < len(indexes) - 1 and (indexes[end] + 1) == indexes[end+1]:
            end += 1
        intervals.append((indexes[start], indexes[end] + wsize))
        start = end + 1
    return intervals  

def find_diff_expr_chrom(chrom, t_chrom_dsets, c_chrom_dsets, 
                         bedfhd,
                         wsize=100, 
                         coverage_threshold=0.025, 
                         foldchange_threshold=3.0,
                         pvalue_threshold=0.05):
    # chrom length
    chrom_length = get_chrom_length(chrom)
    # input param checking
    if wsize > chrom_length:
        logging.error("window size %d > chromosome length %d" % (wsize, chrom_length))
        wsize = chrom_length
    # obtain "chunks" of data to process because reading one big chunk
    # from the hdf5 file is faster than many small chunks
    default_bin_size = 1000000
    # results
    indexes = []
    
    for bin_start in xrange(0, chrom_length - wsize, default_bin_size):
        # adjust bin size at last interval at end of chromosome
        bin_size = min(default_bin_size, chrom_length - wsize - bin_start)
        # store window values
        windows = [None, None]

        logging.debug("    processing %d-%d" % (bin_start, bin_start + bin_size))        
        for k, chrom_dsets in enumerate([t_chrom_dsets, c_chrom_dsets]):
            # read the bin data from the hdf5 file
            bins = np.zeros((bin_size + wsize, len(chrom_dsets)), dtype=np.float)
            # need to somehow pool all the individual datasets that comprise each sample
            for j, dsets in enumerate(chrom_dsets.itervalues()):
                # get average covarray for these dsets
                # TODO: do something else here instead of just average
                for dset in dsets:
                    bins[:,j] += dset[bin_start:bin_start + bin_size + wsize]
                bins[:,j] /= len(dsets)
#            for j, dset in enumerate(chrom_dsets):
#                bins[:,j] = dset[bin_start:bin_start + bin_size + wsize]
            # compute sliding window values
            windows[k] = slidingwindow.sliding_window2(bins, wsize)            

        logging.debug("        computing significance")
        # determine window significance
        # apply a filter to ensure intervals have sufficient coverage
        t_median_coverages = np.median(windows[0], axis=1)
        passed_coverage = t_median_coverages > (coverage_threshold * wsize)
        # do a t-test
        tstats, pvals = ttest(windows[0], windows[1])
        passed_ttest = np.logical_and(tstats > 0, pvals < pvalue_threshold)
        # fold change filter
        foldchanges = median_foldchange(windows[0], windows[1])
        passed_foldchange = foldchanges > foldchange_threshold
        bin_indexes = np.nonzero(np.logical_and(passed_foldchange, 
                                                np.logical_and(passed_coverage, passed_ttest)))[0]
        #for idx in bin_indexes:
        #    print 'chrom', chrom, 'index', bin_start + idx, 't', windows[0][idx], 'c', windows[1][idx], 'pval', pvals[idx]
        indexes.extend(bin_start + bin_indexes)

    # cluster adjacent indexes together
    logging.debug("        clustering")
    intervals = cluster(indexes, wsize)
    # write intervals to file
    for interval in intervals:
        bedfhd.write('%s\t%d\t%d\t%s\t%.2f\n' % (chrom, interval[0], interval[1], 'gumby', 0.0))
    bedfhd.flush()

def find_diff_expr(t_dsets, c_dsets, outfhd):
    for chrom in sorted(get_chrom_names()):
#    for chrom in ['chr9']:
        t_chrom_dsets = {}
        for sample_name, sample_dsets in t_dsets.iteritems():
            t_chrom_dsets[sample_name] = [x[chrom] for x in sample_dsets]
        c_chrom_dsets = {}
        for sample_name, sample_dsets in c_dsets.iteritems():
            c_chrom_dsets[sample_name] = [x[chrom] for x in sample_dsets]
        logging.debug('diff expr for %s' % chrom)
        find_diff_expr_chrom(chrom, t_chrom_dsets, c_chrom_dsets, outfhd)

if __name__ == '__main__':
    optionparser = OptionParser("usage: %prog [options] <coveragedb.hdf5>")    
    optionparser.add_option("-t", "--treatment", dest="treatment",
                            help="sample group name of treatment")
    optionparser.add_option("-c", "--control", dest="control",
                            help="sample group name of control")
    optionparser.add_option("-s", "--samples", dest="samples",
                            help="sample groups XML file")
    optionparser.add_option("-o", "--output", dest="outfile",
                            default="diffexpression.bed",
                            help="output file [default: %default]")
    (options, args) = optionparser.parse_args()
    if options.outfile == None:
        outfhd = sys.stdout
    else:
        outfhd = open(options.outfile, 'w')
    if len(args) == 0:
        optionparser.error("no HDF5 coverage file specified")

    # configure logging
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s %(message)s',
                        filename="rnaseek.log")

    # read sample groups
    sample_groups = {}
    for sgroup in parse_samplegroups_xml(options.samples):
        sample_groups[sgroup.name] = sgroup
    # get treatment and control groups
    if options.treatment not in sample_groups:
        raise KeyError('%s not in sample groups xml file' % 
                       options.treatment)
    if options.control not in sample_groups:
        raise KeyError('%s not in sample groups xml file' %
                       options.control)
    t = sample_groups[options.treatment]
    c = sample_groups[options.control]
    logging.debug('treatment sample group: %s' % options.treatment)
    logging.debug('control sample group: %s' % options.control)

    # open the coverage database file
    h5file = h5py.File(args[0], 'r')
    # extract the individual flowcell/lane datasets from sample groups
    logging.debug('getting treatment samples')
    t_dsets = get_datasets_from_sample_group2(t, h5file)
    logging.debug('getting control samples')
    c_dsets = get_datasets_from_sample_group2(c, h5file)

    # TODO: create combined datasets for each sample?
    logging.debug('starting search...')
    # kickoff the expression search
    find_diff_expr(t_dsets, c_dsets, outfhd)    
    # done
    h5file.close()
    logging.shutdown()