'''
Created on Dec 1, 2009

@author: mkiyer
'''

import veggie.db.sample as sdb
from veggie.db.sample.samplegroup import parse_experiment_xml
from veggie.gene.gene import parse_refseq_genes
from bx.intervals.intersection import Interval, IntervalTree
import sys
import os
import glob
import re
import collections
import logging

def build_interval_trees(genes):
    gene_trees = collections.defaultdict(lambda: IntervalTree())    
    for g in genes:
        # add gene to gene tree
        gene_trees[g.chrom].insert_interval(Interval(g.tx_start, g.tx_end, value=g))
    return gene_trees

#def build_interval_trees(bedfile):
#    gene_trees = collections.defaultdict(lambda: IntervalTree())
#    for line in open(bedfile):
#        fields = line.split('\t')
#        chrom = fields[0]
#        start = int(fields[1])
#        end = int(fields[2])
#        gene_trees[chrom].insert_interval(Interval(start, end))    
#    return gene_trees

def get_prognosis(sample):
    benign_params = set(['Benign Cell Line', 'Benign Tissue'])
    cancer_params = set(['Localized Cell Line', 'Metastatic Tissue', 'Localized Tissue', 'Metastatic Cell Line'])    
    ets_params = set(['ETV1+', 'ERG+', 'ETS-'])
    if sample.params['diagnosis'] in cancer_params:
        return True
    else:
        return False

def fetch_junctions_from_name(sample_name, sdbi, data_path, qc_filter=True):
    logger = logging.getLogger(__name__)    
    # get the libraries associated with this sample
    libraries = sdbi.get_libraries_by_sample_name(sample_name, best=True)
    for library in libraries:
        if library.qc_status == False:
            logger.warning('library %s: marked QC-FAIL' % library.id)
            if qc_filter:
                logger.warning('library %s: excluded from query' % library.id)
                continue
        # check for junction files
        #wigfile = library.id + '.wig'
        bedfile = library.id + '_junctions.bed'
        if (os.path.exists(os.path.join(data_path, bedfile))): 
            yield library.id
        else:
            logger.warning('library %s not found in database' % library.id)

def fetch_experiment_libraries(experiment, sdbi, data_path):
    experiment.exper.libraries = set()
    experiment.control.libraries = set()
    for sample_name in experiment.exper.samples:
        experiment.exper.libraries.update(list(fetch_junctions_from_name(sample_name, sdbi, data_path)))
    for sample_name in experiment.control.samples:
        experiment.control.libraries.update(list(fetch_junctions_from_name(sample_name, sdbi, data_path)))

class ExperimentResults(object):
    
    FAIL = 'fail'
    SUCCESS = 'success'

    def __init__(self):        
        self.exper = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
        self.control = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
        self.outcome = None

    def exper_sample_count(self):
        return len(self.exper)
    def control_sample_count(self):
        return len(self.control)
    def exper_library_count(self):
        return sum([len(x) for x in self.exper])
    def control_library_count(self):
        return sum([len(x) for x in self.control])
    def exper_score(self):
        return sum([sum(x.values()) for x in self.exper.itervalues()])
    def control_score(self):
        return sum([sum(x.values()) for x in self.control.itervalues()])
    
    def __str__(self):
        s = ['(' + str(self.outcome) + ')']
        for name, libs in self.exper.iteritems():
            header = '%s|%d reads|%d libraries' % (name, 
                                                   sum(libs.values()),
                                                   len(libs))
            libstr = []            
            for lib, score in libs.iteritems():
                libstr.append('(%s=%d)' % (lib, score))
            s.append('[%s|%s]' % (header, ''.join(libstr)))
        return ''.join(s)


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)

    exper_file = sys.argv[1]
    #exper_file = 'etsneg_vs_etspos.xml'
    #exper_file = 'exper_matched_normals.xml'

    ref_bed_file = 'validated_genes_ref.bed'
    juncs_path = '/archive10/bigwig/tophat_results'

    # check parameters
    assert os.path.exists(ref_bed_file)
    assert os.path.exists(exper_file)
    assert os.path.exists(juncs_path)

    # default experiment parameters
    min_exper_samples = 1
    min_exper_reads = 2

    # load reference genes
    logging.debug("Loading reference genes...")
    refseq_genes = parse_refseq_genes()
    gene_trees = build_interval_trees(refseq_genes)

    # load sampledb snapshot
    sdbi = sdb.get_sampledb_instance()
    # parse experiments
    experiments = list(parse_experiment_xml(exper_file))
    # assign the available libraries to the experiments    
    library2experiment = {}
    total_exper_samples = 0
    total_control_samples = 0
    total_exper_libs = 0
    total_control_libs = 0
    for e in experiments:
        fetch_experiment_libraries(e, sdbi, juncs_path)
        for library_id in e.exper.libraries:
            if library_id in library2experiment:
                logging.error("library %d cannot be part of 2 experiments" % (library_id))
            library2experiment[library_id] = e
            total_exper_libs += 1
        for library_id in e.control.libraries:
            if library_id in library2experiment:
                logging.error("library %d cannot be part of 2 experiments" % (library_id))
            library2experiment[library_id] = e
            total_control_libs += 1
        total_exper_samples += len(e.exper.samples)
        total_control_samples += len(e.control.samples)
        logging.debug("Experiment %s: exper samples (%s) libraries (%s)" % (e.name, e.exper.samples, e.exper.libraries))
        logging.debug("Experiment %s: control samples (%s) libraries (%s)" % (e.name, e.control.samples, e.control.libraries))
    logging.debug("All libraries: %s" % (library2experiment.keys()))
    logging.debug("Total Samples: %d" % (total_exper_samples + total_control_samples))
    logging.debug("Total Libraries: %d" % (total_exper_libs + total_control_libs))
    logging.debug("Total Experimental group samples: %d" % (total_exper_samples))
    logging.debug("Total Experimental group libraries: %d" % (total_exper_libs))
    logging.debug("Total Control group samples: %d" % (total_control_samples))
    logging.debug("Total Control group libraries: %d" % (total_control_libs))
    
    juncdb = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
    library_sample_names = {}
    stop_after = 10
    i = 0
    # load junctions
    for library_id in library2experiment.iterkeys():
        junc_file = os.path.join(juncs_path, library_id + "_junctions.bed") 
        library_samples = sdbi.get_library_samples(library_id)
        # can currently only handle libraries that correspond to a single sample
        assert len(library_samples) == 1
        library_sample_names[library_id] = library_samples[0].name
        logging.debug('Processing %s' % junc_file)
        for line in open(junc_file):
            chrom, start, end, name, score, strand = line.strip().split('\t')
            start = int(start)
            end = int(end)
            score = int(score)            
            junc = juncdb[(chrom, start, end, strand)]
            junc[library_id] += score   
#        i += 1
#        if i > stop_after:
#            break

    # setup junction classification parameters
    padding = 5
    SPLICE = 0
    INTERGENIC = 1
    EXTENSION = 2
    READ_THROUGH = 3
    junc_type_names = ['splice', 'intergenic', 'extension', 'readthrough']    
        
    logging.debug('Examining junctions')
    for junc_id, junc_item in enumerate(juncdb.iteritems()):
        junc, junc_libraries = junc_item
        chrom, start, end, strand = junc        
        if chrom not in gene_trees:
            continue

        # search for genes intersecting this junction
        start_symbols = set([x.value.symbol for x in gene_trees[chrom].find(start-padding, start+padding)])
        end_symbols = set([x.value.symbol for x in gene_trees[chrom].find(end-padding, end+padding)])
        shared_symbols = start_symbols.intersection(end_symbols)
        junc_type = 0
        if len(start_symbols) == 0 and len(end_symbols) == 0:
            junc_type = INTERGENIC 
        elif ((len(start_symbols) == 0 and len(end_symbols) != 0) or
            (len(start_symbols) != 0 and len(end_symbols) == 0)):
            junc_type = EXTENSION
        elif len(shared_symbols) == 0:
            junc_type = READ_THROUGH

        # create container for experiment results
        junc_samples = collections.defaultdict(lambda: collections.defaultdict(lambda: 0))
        exper_results = collections.defaultdict(lambda: ExperimentResults())
        # aggregate results
        for junc_library, score in junc_libraries.iteritems():
            sample_name = library_sample_names[junc_library]
            junc_samples[sample_name][junc_library] = score
            # get the associated experiment for this library
            e = library2experiment[junc_library]
            # get the experiment results data or create a new set of results
            er = exper_results[e.name]
            if junc_library in e.exper.libraries:
                er.exper[sample_name][junc_library] = score
            if junc_library in e.control.libraries:
                er.control[sample_name][junc_library] = score

        # gather experiment statistics
        exper_libs, control_libs = 0, 0
        exper_samples, control_samples = 0, 0
        exper_reads, control_reads = 0, 0        
        for e in experiments:
            eresults = exper_results[e.name]
            # sample statistics
            exper_samples += eresults.exper_sample_count()
            control_samples += eresults.control_sample_count()
            # library statistics
            exper_libs += eresults.exper_library_count()
            control_libs += eresults.control_library_count()
            # read statistics
            exper_reads += eresults.exper_score()
            control_reads += eresults.control_score()
            # record outcome
            if control_reads > 0:
                eresults.outcome = ExperimentResults.FAIL
            elif ((exper_samples >= min_exper_samples) and
                  (exper_reads >= min_exper_reads)):
                eresults.outcome = ExperimentResults.SUCCESS

        # if not enough experiments passed, then don't output experiment info
        # restrict to junctions that are mutually exclusive for experimental
        # versus control groups
        if (exper_samples > 0) and (control_samples > 0):
            continue

        # build a string showing the number of reads from each sample
        junc_samples_str = []
        for sample_name, libraries in junc_samples.iteritems():
            sample_reads = sum(libraries.values())
            junc_samples_str.append(sample_name + '(' + str(sample_reads) + ')')
        junc_samples_str = ','.join(junc_samples_str)

        # calculate statistics for output
        total_samples = exper_samples + control_samples        
        ratio_total_samples = int(100 * float(exper_samples + control_samples) / 
                                  (total_exper_samples + total_control_samples))        
        ratio_exper_samples = int(100 * float(exper_samples) / (total_exper_samples))
        ratio_control_samples = int(100 * float(control_samples) / (total_control_samples))
        total_libs = exper_libs + control_libs
        ratio_total_libs = int(100 * float(exper_libs + control_libs) / 
                               (total_exper_libs + total_control_libs))
        ratio_exper_libs = int(100 * float(exper_libs) / (total_exper_libs))
        ratio_control_libs = int(100 * float(control_libs) / (total_control_libs))
        total_reads = exper_reads + control_reads

        # output generic junction information
        junc_str = ['JUNC%d' % (junc_id),
                    chrom, 
                    str(start), 
                    str(end), 
                    strand, 
                    junc_type_names[junc_type],
                    ','.join(start_symbols) if len(start_symbols) > 0 else 'None',
                    ','.join(end_symbols) if len(end_symbols) > 0 else 'None',
                    str(total_samples), str(ratio_total_samples) + '%',
                    str(exper_samples), str(ratio_exper_samples) + '%',
                    str(control_samples), str(ratio_control_samples) + '%',
                    str(total_libs), str(ratio_total_libs) + '%',
                    str(exper_libs), str(ratio_exper_libs) + '%',
                    str(control_libs), str(ratio_control_libs) + '%',
                    str(total_reads),
                    str(exper_reads),
                    str(control_reads)]
#                    str(sum([len(x) for x in junc_samples.itervalues()])),
#                    str(len(junc_samples)),
#                    junc_samples_str,
#                    str(success_score),
#                    str(failure_score),
#                    str(success_count),
#                    str(failure_count)]
        # now output information about which experiments passed
        estr = []
        for ename, eresults in exper_results.iteritems():
            estr.append('{%s:%s}' % (ename, eresults))
        junc_str.append(''.join(estr))
        # write the final string to stdout
        junc_str = '\t'.join(junc_str)
        sys.stdout.write(junc_str + '\n')
        continue

        # check cancer specificity
        prognosis_list = []
        ncancer, nbenign = 0, 0
        cancer_score, benign_score = 0, 0
        sample_reads = collections.defaultdict(lambda: 0)
        
        for junc_library, score in junc_libraries.iteritems():
            sample_reads[library_sample_names[junc_library]] += score
            prognosis = library_prognosis[junc_library]
            if prognosis == 0:
                nbenign += 1
                benign_score += score
            else:
                ncancer += 1
                cancer_score += score
        ratio_cancer = ncancer / float(total_cancer)
        ratio_benign = nbenign / float(total_benign)
        
        sample_reads_str = []
        for sample_name, reads in sample_reads.iteritems():
            sample_reads_str.append(sample_name + '(' + str(reads) + ')')
        sample_reads_str = ','.join(sample_reads_str)
        
        if (ncancer >= 2) and (cancer_score > 2) and (nbenign == 0):
            sys.stdout.write('\t'.join([chrom,
                                        str(start),
                                        str(end),
                                        strand,
                                        junc_type_names[junc_type],
                                        str(ncancer),
                                        str(nbenign),
                                        '%.2f' % ratio_cancer,
                                        '%.2f' % ratio_benign,
                                        str(cancer_score),
                                        str(benign_score),
                                        ','.join(start_symbols) if len(start_symbols) > 0 else 'None',
                                        ','.join(end_symbols) if len(end_symbols) > 0 else 'None',
                                        str(len(sample_reads)),
                                        sample_reads_str]))
            sys.stdout.write('\n')
    
        #m = re.match(r"(.*)_junctions.bed", os.path.basename(junc_file))
        #library_id = m.group(1)
    