'''
Created on Oct 18, 2009

@author: mkiyer
'''
from optparse import OptionParser
from veggie.app.rnaseq.cufflinks import CuffCompare, Cufflinks
from veggie.app.rnaseq.tophat import TopHat
from veggie.sample.sampledb2 import get_sampledb
from veggie.sample.samplegroup import parse_samplegroups_xml
from veggie.sequence.io import get_seq_length
import glob
import logging
import os
import re
import sys
import veggie.db.refdb as refdb

__junction_gff3_path = refdb.get_ucsc_table('genes_combined', 'gff3')

def find_pe_sequence_files(flowcell, lane, sequence_path,
                           prefix_fmt="%s_%s",
                           seq_suffix="best_sequence.txt",
                           bowtie_quals_suffix='best_bowtiequals.txt'):
    analysis_id = prefix_fmt % (flowcell, lane)
    # build a string to use to search for sequence files    
    mate1_search_string = ''.join([analysis_id, '*_1_', seq_suffix])
    mate2_search_string = ''.join([analysis_id, '*_2_', seq_suffix])
    # glob to get the sequence file matches
    mate1_fastq_files = glob.glob(os.path.join(sequence_path, mate1_search_string))
    mate2_fastq_files = glob.glob(os.path.join(sequence_path, mate2_search_string))
    if len(mate1_fastq_files) != 1 and len(mate2_fastq_files) != 1:
        logging.error('Error finding paired-end sequences for flowcell %s lane %s' % (flowcell, lane))
        return None
    fastq_files = [mate1_fastq_files[0], mate2_fastq_files[0]]
    # regexp for getting beginning of filename to use as prefix
    # in search for bowtie quals file
    prefix_re = re.compile(r'(.*)' + seq_suffix)    
    quals_params = set([])
    read_lengths = set([])    
    for fastq_file in fastq_files:
        m = prefix_re.match(os.path.basename(fastq_file))
        if not m:
            logging.critical("regexp match failed on file %s" % fastq_file)
            continue
        # get the quals param from the bowtie file
        fastq_prefix = m.group(1)
        quals_file = os.path.join(sequence_path, fastq_prefix + bowtie_quals_suffix)            
        if not os.path.exists(quals_file):
            logging.error("quality score file %s not found... skipping %s" %
                          (quals_file, fastq_file))
            return
        quals_fhd = open(quals_file)
        quals_params.add(quals_fhd.readline().strip())
        quals_fhd.close()        
        # get the read length
        read_lengths.add(get_seq_length(fastq_file))

    # check to see that read lengths and quality scoring system is the same
    # for all the fastq files
    if len(quals_params) > 1:
        logging.error('quality scores not consistent across samples: %s' % quals_params)
        return None
    if len(read_lengths) > 1:
        logging.error('read lengths not consistent across samples: %s' % read_lengths)
        return None        
    # return tuple with sequence files, quals param, and read length    
    return fastq_files, quals_params.pop(), read_lengths.pop()


def run_tophat(analysis_id, fastq_files, quals_param, inner_dist, analysis_cwd,
               processors=6):
    # skip alignments that already exist
    logging.debug("%s: preparing to run TopHat on files: %s quals: %s inner_dist: %d" % 
                  (analysis_id, fastq_files, quals_param, inner_dist))      
    # options
    tophat_options = {'-p': processors,
                      '--output-dir': analysis_cwd,
                      '--' + quals_param: None,
                      '--mate-inner-dist': inner_dist,
                      '--mate-std-dev': 20,
                      '--min-intron-length': 60,
                      '-g': 1,
#                      '--butterfly-search': None,
                      '-G': __junction_gff3_path}
    # create the app controller
    tophat_app = TopHat(options=tophat_options)
    # run
    tophat_app.run_paired_end(fastq_files[0], fastq_files[1],
                              output_dir=analysis_cwd, 
                              genome="hg18")
    logging.debug("%s: TopHat finished" % (analysis_id))

def run_cufflinks(analysis_id, inner_dist, analysis_cwd,
                  processors=4):
    if not os.path.exists(os.path.join(analysis_cwd, 'accepted_hits.sam')):
        logging.debug("%s: no tophat results found at %s, skipping..." % (analysis_id, analysis_cwd))
        return
    # create a file to indicate that cufflinks has been initiated 
    # on this analysis
    lockfile = os.path.join(analysis_cwd, "cufflinks.lock")
    if os.path.exists(lockfile):
        logging.debug("%s: cufflinks lock file exists, skipping..." % analysis_id)
        return
    # create the lock
    open(lockfile, 'w').close()
    # setup and run cufflinks
    logging.debug("%s: preparing to run Cufflinks with inner_dist %d" % (analysis_id, inner_dist))
    cufflinks_app = Cufflinks(cwd=analysis_cwd)
    cufflinks_app.run('accepted_hits.sam', 
                      inner_dist, 
                      num_threads=processors, 
                      label=analysis_id)
    logging.debug("%s: Cufflinks finished" % (analysis_id))
    return

def run_cuffcompare(analysis_id, analysis_cwd):    
    transcripts_gtf = "transcripts.gtf"
    app_name = "cuffcompare"
    # ensure required files exist
    if not os.path.exists(os.path.join(analysis_cwd, transcripts_gtf)):
        logging.error("%s: no cufflinks results (%s) found at %s, skipping..." %
                      (analysis_id, transcripts_gtf, analysis_cwd))
        return
    # create a file to indicate that cuffcompare has been initiated 
    # on this analysis
    lockfile = os.path.join(analysis_cwd, "%s.lock" % (app_name))
    if os.path.exists(lockfile):
        logging.debug("%s: %s lock file exists, skipping..." % (app_name, analysis_id))
        return    
    # create the lock
    open(lockfile, 'w').close()    
    # setup and run cuffcompare
    app = CuffCompare(cwd=analysis_cwd)
    retcode, result_paths = app.run(transcripts_gtf)
    logging.debug("%s: %s returned %s" % (analysis_id, app_name, str(retcode)))
    return

def run_pipeline(analyses, sequence_path, results_path):
    prefix_fmt="%s_%s"
                   
    for analysis in analyses:
        flowcell, lane, insert_size = analysis
        analysis_id = prefix_fmt % (flowcell, lane)
        analysis_cwd = os.path.join(results_path, analysis_id)

        res = find_pe_sequence_files(flowcell, lane, sequence_path)
        if res is not None:
            # by default do not overwrite existing results
            if os.path.exists(analysis_cwd):
                logging.debug("%s: results path %s exists, skipping..." % (analysis_id, analysis_cwd))
                continue
            # make directory for results
            logging.debug('%s: creating analysis results path %s' % (analysis_id, analysis_cwd))
            os.makedirs(analysis_cwd)
            
            fastq_files, quals_param, read_length = res
            inner_dist = insert_size - read_length
            if inner_dist <= 0:
                logging.error("%s: (inner_dist < 0) insert_size: %d read_length: %d inner_dist: %d" % (analysis_id, insert_size, read_length, inner_dist))
                inner_dist = 200 - read_length
                logging.debug("%s: setting inner_dist to (200 - read_length)=%d" % (analysis_id, inner_dist))
            elif inner_dist < 100:
                logging.warning("%s: (inner_dist < 100) insert_size: %d read_length: %d inner_dist: %d" % (analysis_id, insert_size, read_length, inner_dist))   
            run_tophat(analysis_id, fastq_files, quals_param, inner_dist, analysis_cwd)
            run_cufflinks(analysis_id, inner_dist, analysis_cwd)
            run_cuffcompare(analysis_id, analysis_cwd)


def get_all_analyses():
    # query sampleDB for paired-end RNA-seq flowcell/lanes
    analyses = get_sampledb().get_analyses_w_insert_size({'app_type': 'Transcriptome',
                                                          'analysis': 'eland_pair'})
    return analyses

def get_analyses_from_samples(sample_xmlfile):
    analyses = []
    for sample_group in parse_samplegroups_xml(options.samples):
        for sample in sample_group.samples:
            insert_size = get_sampledb().params[sample]['insert_size']
            for fclane in get_sampledb().params[sample]['lanes']:
                logging.debug('    sample: %s lanes: %s' %
                              (sample, fclane))
                analyses.append((fclane[0], fclane[1], insert_size))
    return analyses

if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)

    optionparser = OptionParser("usage: %prog [options] [-s <samples.xml>] <sequence_path> <results_path>")
    optionparser.add_option("-s", "--samples", dest="samples",
                            help="process only the samples defined in the sample XML file")                            
    (options, args) = optionparser.parse_args()
    if options.samples is None:
        optionparser.error("No samples specified")
    else:
        analyses = get_analyses_from_samples(options.samples)
    # positional arguments
    sequence_path = args[0]
    results_path = args[1]

    # log the command-line arguments
    logging.debug('sequence_path: %s', sequence_path)
    logging.debug('results_path: %s', results_path)
    logging.debug('analyses: %s', analyses)

    # make directory for results
    if not os.path.exists(results_path):
        logging.debug('Creating results path %s' % results_path)
        os.makedirs(results_path)

    # run rnaseq pipeline
    run_pipeline(analyses, sequence_path, results_path)
