'''
Created on Oct 21, 2009

@author: mkiyer
'''
import os
import sys
import collections
import glob
import logging
from optparse import OptionParser
from bx.intervals.intersection import Interval, IntervalTree
from veggie.io.bed import bed_reader
from veggie.sample.sampledb2 import get_sampledb
from veggie.sample.samplegroup import parse_samplegroups_xml
import numpy as np
import operator

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import math
    
def histogram(data, xlabel='x', ylabel='y', outfile=None):
    data = np.array(data)
    print 'average', np.average(data)

    fig = plt.figure()
    ax = fig.add_subplot(111)
    nbins=100
    # the histogram of the data
    n, bins, patches = ax.hist(data, bins=nbins, facecolor='green', alpha=0.5, log=False, normed=0)
    plt.axvline(x=np.average(data), linewidth=4, color='r')
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    #n, bins, patches = ax.hist(x2, bins=bins, facecolor='red', alpha=0.25)
    #l, = plt.plot(bins, mlab.normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3)
    #legend([l, patches[0]], ['fit', 'hist'])
    ax.grid(True)
    if outfile is None:
        plt.show()
    else:
        plt.savefig(outfile)
        
    plt.close()
    return

def plot_scatter(data, 
                 outfile=None,
                 xlabel='Control Group',
                 ylabel='Experimental Group',
                 title='Coverage scatter plot'):  
    fig = plt.figure()
    ax = fig.add_subplot(111)

    xdata = [d[0] for d in data]
    ydata = [d[1] for d in data]
    # size?
    # s = 20*np.log(np.array([x.ratio for x in intervals]))        
    # color?
    # c = [x.category * cm.jet.N / len(category_names)] * len(intervals)
    # c = x.category * cm.jet.N / len(category_names)
    plt.scatter(xdata, ydata, 
                s=30, c='b', marker='o', cmap=None, norm=None,
                vmin=None, vmax=None, alpha=1.0, linewidths=None,
                verts=None)
    plt.grid()
    
    #themax = max(max(xdata), max(ydata))
    #plt.axis([-10, themax, -10, themax])
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title(title)
    
    if outfile is None:
        plt.show()
    else:
        plt.savefig(outfile)
    plt.close()
    
    return    
    #ax.set_xscale('log')
    #ax.set_yscale('log')

    plt.axvline(x=np.average(xdata), linewidth=4, color='r')
    plt.axhline(y=np.average(ydata), linewidth=4, color='g')
    
    print 'x', np.average(xdata)
    print 'y', 'avg', np.average(ydata), 'min', np.min(ydata), 'max', np.max(ydata)
    
    # label=category_names[category])
    #plt.legend()
    # best fit line
    sorted_data = sorted(data, key=operator.itemgetter(0))
    x, y = [d[0] for d in sorted_data], [d[1] for d in sorted_data]    
    m = np.polyfit(x, y, 1)
    yfit = np.polyval(m, x)
    plt.plot(x, yfit, 'g')
    # correlation coefficient
    c = np.corrcoef(xdata, ydata)[0,1]
    r2 = c**2
    plt.figtext(0.815, 0.013, ' r^2=%.3f' % r2, color='black', weight='roman',
               size='small')    
    plt.grid()
    themax = max(x[-1], y[-1])
    plt.axis([-10, themax, -10, themax])
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)


qc_fail_libraries = set(['30WU2AAXX_2',
                         '30WU2AAXX_3',
                         '42P6UAAXX_8'])

def build_interval_trees(samples, results_path,
                         results_suffix="unknown_candidates.bed"):
    trees = collections.defaultdict(lambda: IntervalTree())
    annots = []
    libraries = []        
    for sample in samples:                
        for res in get_sampledb().params[sample]['lanes']:
            flowcell, lane = res
            library_id = '%s_%d' % (flowcell, lane)
            library_bedfile = '%s.%s' % (library_id, results_suffix)
            library_path = os.path.join(results_path, library_bedfile)
            if not os.path.exists(library_path):
                logging.critical('File not found: %s' % library_path)
            if library_id in qc_fail_libraries:
                logging.debug('Skipping poor quality library %s' % (library_id))
                continue
            logging.debug('Adding library %s to sample %s' % (library_id, sample))
            libraries.append(library_id)
            for annot in bed_reader(open(library_path)):
                # add sample attribute
                annot.sample = sample
                trees[annot.chrom].insert_interval(Interval(annot.start, annot.end, value=annot))
                annots.append(annot)
    return trees, annots, libraries

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    outfhd = sys.stdout
    percent_overlap = 25.0
    mycmap = ['blue', 'red', 'green']
    
    # parse command line
    optionparser = OptionParser("usage: %prog [options] <sample1> <sample2> <bed_dir>")
    (options, args) = optionparser.parse_args()
    sample1, sample2 = args[0], args[1]
    samples = [args[0], args[1]]
    results_path = args[2]

    trees, annots, libraries = build_interval_trees(samples, results_path)
    
    matched_coverage = []    
    # intersect each interval
    for a1 in annots:
        id1 = a1.name.split('.')[0] 
        hits = trees[a1.chrom].find(a1.start, a1.end)
        true_hits = []
        library_best_hits = {}
        library_best_overlap = collections.defaultdict(lambda: 0.0)
                
        for hit in hits:
            a2 = hit.value
            id2 = a2.name.split('.')[0]                
            # ignore hits from same library (isoforms)
            if id1 == id2:
                continue
            # find overlapping interval
            ostart, oend = sorted((a1.start, a1.end, a2.start, a2.end))[1:3]
            overlap_percent = max(float(oend - ostart)/(a1.end - a1.start),
                                  float(oend - ostart)/(a2.end - a2.start))
            # TODO: set an overlap threshold?
            #library_hits[id2].append((a2, overlap_percent))
            # choose the isoform with best overlap for each library
            if overlap_percent > library_best_overlap[id2]:
                library_best_overlap[id2] = overlap_percent
                library_best_hits[id2] = a2
        # organize hits by sample
        sample_coverages = collections.defaultdict(lambda: [])
        for transcript in library_best_hits.values():
            sample_coverages[transcript.sample].append(transcript.score)
        # TODO: how to find average?  with or without "zeros"?
        # for now, do this without the zeros included, assuming the lanes
        # are poor quality and depth of coverage is poor
        avg1 = np.average(sample_coverages[sample1])
        avg2 = np.average(sample_coverages[sample2])
        matched_coverage.append((avg1, avg2))        
        if avg1 == 0: avg1 = 0.01
        if avg2 == 0: avg2 = 0.01
        if max(avg1, avg2) > 10.0 and abs(math.log(avg1/avg2)) >= 1.0:
            print '\t'.join([a1.chrom, str(a1.start), str(a1.end), str(avg1), str(avg2)])
        #    print a1.chrom, a1.start, a1.end, a1.name, a1.score, a2.chrom, a2.start, a2.end, a2.name, a2.score        

    #histogram(recurrences)
    #histogram(avg_percent_overlaps)
    plot_scatter(matched_coverage, 
                 outfile=None,
                 xlabel='%s' % (sample1),
                 ylabel='%s' % (sample2),
                 title='%s vs. %s' % (sample1, sample2))
        