'''
Created on Nov 7, 2009

@author: mkiyer
'''

from bx.intervals.intersection import Interval, IntervalTree
from veggie.io.bed import bed_reader
from veggie.genome.chrom import get_chrom_names

import collections
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import math
import copy

class Transcript(object):
    id_prefix = 'transcript'
    id_count = 0
    
    @staticmethod
    def __get_id():
        id = Transcript.id_prefix + str(Transcript.id_count)
        Transcript.id_count += 1
        return id
    
    def __init__(self, chrom, start, end, strand):
        self.id = self.__get_id()        
        self.chrom = chrom
        self.start = start
        self.end = end
        self.strand = strand
        self.library_ids = []
        self.covs = []
        self.p_value = 1.0

    def __str__(self):
        return '\t'.join([self.chrom,
                          str(self.start),
                          str(self.end),
                          self.id,
                          '%.5f' % (-np.log(self.p_value)),
                          '.' if self.strand is None else self.strand,
                          str(len(self.library_ids)),
                          ','.join(self.library_ids),
                          ','.join(map(str, self.covs))])

def create_transcripts_from_bed(bedfhd):
    transcripts = []
    chrom_names = get_chrom_names()

    for annot in bed_reader(bedfhd):
        if annot.chrom not in chrom_names:
            continue
        t = Transcript(annot.chrom, annot.start, annot.end, annot.strand)
        t.library_ids.append(annot.name)
        t.covs.append(annot.score)
        transcripts.append(t)
    return transcripts

def create_transcript_trees(transcripts):
    trees = collections.defaultdict(lambda: IntervalTree())
    for t in transcripts:
        # need the merged attribute as a flag during the merge procedure
        t.merged = False
        trees[t.chrom].insert_interval(Interval(t.start, t.end, value=t))
    return trees

def get_overlap_percent(x1, x2, y1, y2):
    ostart, oend = sorted((x1, x2, y1, y2))[1:3]
    overlap_percent = min(float(oend - ostart)/(x2 - x1),
                          float(oend - ostart)/(y2 - y1))
    return overlap_percent

def compare_strands(strand1, strand2):
    if strand1 == '+' and strand2 == '-':
        return False
    elif strand1 == '-' and strand2 == '+':
        return False
    return True

def merge_transcripts(transcripts):    
    trees = create_transcript_trees(transcripts)
    overlap_threshold = 0.00
    new_transcripts = []
    
    for tx in transcripts:        
        if tx.merged is True:
            # skip this transcript if it has already been merged with
            # another
            continue

        # save all of the accepted overlapping transcripts
        accepted_hits = []

        # intersect this transcript with all others
        hits = trees[tx.chrom].find(tx.start, tx.end)        
        for hit in hits:
            hit_tx = hit.value            
            # since the tree contains all transcripts, every transcript
            # will intersect with itself, so skip these events
            if tx.id == hit_tx.id:
                continue
            # could intersect with transcripts that have already been merged
            # into new transcripts.  in this case skip the merged transcripts
            # because they are no longer valid and would be removed from the
            # tree if that were possible
            if hit_tx.merged == True:
                continue
            # ensure the strand information does not conflict
            if not compare_strands(tx.strand, hit_tx.strand):
                print 'mismatched strands', tx, hit_tx
                continue
            # find the shared interval between the two transcripts
            ostart, oend = sorted((tx.start, tx.end, hit_tx.start, hit_tx.end))[1:3]
            # calculate the overlap fraction as the maximum percent
            # overlap relative to either transcript
            overlap_fraction = max(float(oend - ostart)/(tx.end - tx.start),
                                   float(oend - ostart)/(hit_tx.end - hit_tx.start))            
            if overlap_fraction > overlap_threshold:
                # mark the hit transcript as 'merged' because a
                # new transcript will be created that is the union
                # all the hits, and the individual hits no longer
                # need to be examined
                hit_tx.merged = True
                accepted_hits.append(hit_tx)

        if len(accepted_hits) == 0:
            # if no overlaps were found then no new
            # transcript was created, so consider this
            # transcript to be independent and add it to
            # the new transcripts list separately
            new_transcripts.append(tx)
        else:
            # create a new transcript that is a copy of the 
            # current transcript
            new_tx = copy.copy(tx)            
            # merge the attributes
            # the new interval is the union of the two
            new_tx.start = np.min([x.start for x in accepted_hits])
            new_tx.end = np.max([x.end for x in accepted_hits])
            for hit in accepted_hits:
                new_tx.library_ids.extend(hit.library_ids)
                new_tx.covs.extend(hit.covs)
            new_transcripts.append(new_tx)
            trees[new_tx.chrom].insert_interval(Interval(new_tx.start, new_tx.end, value=new_tx))
        # the current transcript will always be marked as 'merged'
        # because we just examined all of the intersecting 
        # transcripts
        tx.merged = True
    return new_transcripts

def plot_histogram(data, xlabel='x', ylabel='y', title='amazingness', outfile=None):
    data = np.array(data)
    mu = np.average(data)
    sigma = np.std(data)
    median = np.median(data)    

    fig = plt.figure(1)
    ax = fig.add_subplot(111)
    nbins=min(len(data), 1000)    
    # the histogram of the data
    n, bins, patches = ax.hist(data, bins=nbins, facecolor='green', alpha=1.0, log=False, normed=1)
    plt.xlabel(xlabel)
    plt.ylabel(ylabel)
    plt.title(r'$\mathrm{Histogram\ of\ %s}\ \mu=%f,\ \sigma=%f\ median=%d$' % (title, mu, sigma, median))          
    #plt.axvline(x=np.average(data), linewidth=4, color='r')    
    ax.grid(True)
    if outfile is None:
        plt.show()
    else:
        plt.savefig(outfile)
    plt.close()
    
    print 'n', n
    print 'bins', bins
    print 'patches', patches    
    return


def profile_transcript_lengths(transcripts):
    lengths = []
    for tx in transcripts:
        lengths.append(tx.end - tx.start)
    plot_histogram(lengths, xlabel='Transcript length', ylabel='Count')

def profile_transcript_coverage(transcripts, min_length=200):
    covs = []
    for tx in transcripts:
        covs.extend(tx.covs)
        #if (tx.end - tx.start) < min_length:
    plot_histogram(covs, xlabel='RPKM', ylabel='Count')

def profile_transcripts(transcripts, min_length=300):
    lib = '42B48AAXX_4'
    covs = []
    lengths = []
    for tx in transcripts:
        assert len(tx.covs) == 1 
        assert len(tx.library_ids) == 1
        library_id = tx.library_ids[0].split('.')[0]
        if library_id == lib:
            lengths.append(tx.end - tx.start)
            covs.append(tx.covs[0])
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.scatter(lengths, np.log10(covs), s=10, c='b', marker='o')
    plt.title('Scatter plot of transcript length versus coverage for %s lane %s' % (lib.split('_')[0], lib.split('_')[1]))
    plt.ylabel('log10(coverage) (RPKM)')
    plt.xlabel('length')
    #ax.axvline(x=min_length, linewidth=1, color='r')
    plt.show()
    plt.close()

def get_joint_dist(transcripts, cov_bin_size=1, length_bin_size=10):
    covs = []
    lengths = []
    for tx in transcripts:
        lengths.append(tx.end - tx.start)
        assert len(tx.covs) == 1 
        covs.append(tx.covs[0])
    # calculate the empirical probability mass function of the
    # joint distribution of length and coverage
    lengths = np.array(lengths)
    covs = np.array(covs)
    # function to generate equal sized bins over the range of an array's values
    abin = lambda a, bin_size: np.arange(0, bin_size * (1 + np.ceil(np.max(a)/float(bin_size))), bin_size)
    mybins = (abin(lengths, length_bin_size),
              abin(covs, cov_bin_size))
    h, xedges, yedges = np.histogram2d(lengths, covs, bins=mybins, normed=True)
    return h, length_bin_size, cov_bin_size

def get_p_value(empirical_dist, length, cov):
    h, length_bin_size, cov_bin_size = empirical_dist
    return (1.0 - (np.sum(h[0:length,0:cov]) * length_bin_size * cov_bin_size))

def filter_transcripts(transcripts, min_length=300, p_value=0.05):
    # organize transcripts by library id
    library_transcripts = collections.defaultdict(lambda: [])
    for tx in transcripts:
        assert len(tx.library_ids) == 1
        library_id = tx.library_ids[0].split('.')[0]
        library_transcripts[library_id].append(tx)
    
    # for each library find the empirical distribution 
    # of the transcripts
    library_dists = {}
    for id, tx_list in library_transcripts.iteritems():
        library_dists[id] = get_joint_dist(tx_list)
    # choose transcripts length > min_length and 
    # probability less than some p-value
    accepted_transcripts = []
    rejected_transcripts = []
    for id, tx_list in library_transcripts.iteritems():
        for tx in tx_list:    
            assert len(tx.covs) == 1
            length = tx.end - tx.start
            cov = tx.covs[0]
            p = get_p_value(library_dists[id], length, cov)
            tx.p_value = p
            if p < p_value:
                #print p, tx
                accepted_transcripts.append(tx)
            else:
                rejected_transcripts.append(tx)
    # plot accepted and rejected transcripts
#    fig = plt.figure()
#    ax = fig.add_subplot(111)
#    ax.scatter([(x.end - x.start) for x in rejected_transcripts], 
#               [np.log10(x.covs[0]) for x in rejected_transcripts], s=10, c='b', marker='o', alpha=0.25)
#    ax.scatter([(x.end - x.start) for x in accepted_transcripts], 
#               [np.log10(x.covs[0]) for x in accepted_transcripts], s=30, c='r', marker='d', alpha=1.0)
#    plt.title('Rejected versus accepted transcripts')
#    plt.ylabel('log10(coverage) (RPKM)')
#    plt.xlabel('length')
#    #ax.axvline(x=min_length, linewidth=1, color='r')
#    plt.show()
#    plt.close()
    
    return accepted_transcripts 

if __name__ == '__main__':
    transcripts = create_transcripts_from_bed(open(sys.argv[1]))
    #profile_transcripts(transcripts)
    print 'transcripts:', len(transcripts)
    filtered_transcripts = filter_transcripts(transcripts)
    print 'filtered transcripts:', len(filtered_transcripts)    
    merged_transcripts = merge_transcripts(filtered_transcripts)
    print 'merged transcripts:', len(merged_transcripts)
    #for tx in merged_transcripts:
    #    print tx
    #profile_transcript_lengths(transcripts)
    #profile_transcript_coverage(transcripts)

    

    