'''
Created on Nov 13, 2009

@author: mkiyer
'''
'''
Created on Oct 18, 2009

@author: mkiyer
'''
from optparse import OptionParser
from veggie.app.alignment.bowtie import Bowtie
from veggie.app.alignment.novoalign import Novoalign
from veggie.sample.sampledb2 import get_sampledb
from veggie.sample.samplegroup import parse_samplegroups_xml
from veggie.sequence.io import get_seq_length
import glob
import logging
import os
import re
import sys
import veggie.db.refdb as refdb

def find_pe_sequence_files(flowcell, lane, sequence_path,
                           prefix_fmt="%s_%s",
                           seq_suffix="best_sequence.txt",
                           bowtie_quals_suffix='best_bowtiequals.txt'):
    analysis_id = prefix_fmt % (flowcell, lane)
    # build a string to use to search for sequence files    
    mate1_search_string = ''.join([analysis_id, '*_1_', seq_suffix])
    mate2_search_string = ''.join([analysis_id, '*_2_', seq_suffix])
    # glob to get the sequence file matches
    mate1_fastq_files = glob.glob(os.path.join(sequence_path, mate1_search_string))
    mate2_fastq_files = glob.glob(os.path.join(sequence_path, mate2_search_string))
    if len(mate1_fastq_files) != 1 and len(mate2_fastq_files) != 1:
        logging.error('Error finding paired-end sequences for flowcell %s lane %s' % (flowcell, lane))
        return None
    fastq_files = [mate1_fastq_files[0], mate2_fastq_files[0]]
    # regexp for getting beginning of filename to use as prefix
    # in search for bowtie quals file
    prefix_re = re.compile(r'(.*)' + seq_suffix)    
    quals_params = set([])
    read_lengths = set([])    
    for fastq_file in fastq_files:
        m = prefix_re.match(os.path.basename(fastq_file))
        if not m:
            logging.critical("regexp match failed on file %s" % fastq_file)
            continue
        # get the quals param from the bowtie file
        fastq_prefix = m.group(1)
        quals_file = os.path.join(sequence_path, fastq_prefix + bowtie_quals_suffix)            
        if not os.path.exists(quals_file):
            logging.error("quality score file %s not found... skipping %s" %
                          (quals_file, fastq_file))
            return
        quals_fhd = open(quals_file)
        quals_params.add(quals_fhd.readline().strip())
        quals_fhd.close()        
        # get the read length
        read_lengths.add(get_seq_length(fastq_file))

    # check to see that read lengths and quality scoring system is the same
    # for all the fastq files
    if len(quals_params) > 1:
        logging.error('quality scores not consistent across samples: %s' % quals_params)
        return None
    if len(read_lengths) > 1:
        logging.error('read lengths not consistent across samples: %s' % read_lengths)
        return None        
    # return tuple with sequence files, quals param, and read length    
    return fastq_files, quals_params.pop(), read_lengths.pop()


def get_nonmapping_reads(analysis_id, fastq_files, quals_param, 
                         read_length, 
                         analysis_cwd,
                         processors=2):
    # skip alignments that already exist
    logging.debug("%s: preparing to run Bowtie on files: %s quals: %s" % 
                  (analysis_id, fastq_files, quals_param))
    aligned_fq = '%s.aligned.fq' % (analysis_id)
    unaligned_fq = '%s.unaligned.fq' % (analysis_id)
    unaligned_pe_fq = ['%s.unaligned_1.fq' % (analysis_id), 
                       '%s.unaligned_2.fq' % (analysis_id)]
    
    max_fq = '%s.max.fq' % (analysis_id)
    trimming_3prime = 1
    trimming_5prime = 0
    trimmed_read_length = read_length - trimming_3prime - trimming_5prime
    # options
    app = Bowtie(cwd=analysis_cwd)
    app.setenv('BOWTIE_INDEXES', Bowtie.get_indexes_path())
    # options for paired-end alignment
    app.addOptions({'-q': None,
                    '--%s' % (quals_param): None,
                    '-p': processors,
                    '-n': 2,
                    '-l': trimmed_read_length,
                    '-3': trimming_3prime,
                    '-m': 1,
                    '-k': 1,
                    '--best': None,
                    '--sam': None,
                    '-I': 0,
                    '-X': 500000,
#                    '--al': aligned_fq,
                    '--un': unaligned_fq,
                    '--max': max_fq})
    # output path
    output_file = os.path.join('%s_bowtie.sam' % (analysis_id))
    app.args = [app.get_index('hg18'),
                "-1 %s" % fastq_files[0],
                "-2 %s" % fastq_files[1], 
                output_file]
    # debug
    logging.debug(app.getCommandLine()) 
    # run the program
    retcode, resultpaths = app()
    logging.debug("%s: Bowtie finished" % (analysis_id))
    return unaligned_pe_fq


def run_novoalign(analysis_id, fastq_files, quals_param, insert_size, results_cwd):
    readgroup = '.'
    platformunit = '.'    
    # skip alignments that already exist
    logging.debug("%s: preparing to run novoalign on files: %s quals: %s" % 
                  (analysis_id, fastq_files, quals_param))    
    # alignment options
#    novoalign_options = ({'-o': r'"SAM @RG\\tID:%s\tPU:%s\tLB:%s"' % (readgroup, platformunit, library.id),
#                          '-r': 'None'})
    novoalign_options = ({'-o': 'SAM',
                          '-r': 'None',
                          '-i': "%d %d" % (insert_size, 20)})
    # setup the output SAM file
    output_file = os.path.join(results_cwd, analysis_id + '_novoalign.unaligned.sam')
    output_fhd = open(output_file, 'w')
    # create the app controller
    novoalign_app = Novoalign(options=novoalign_options, cwd=results_cwd,
                              stdout=output_fhd)
    # run
    novoalign_app.run_default(fastq_files, 
                              quals_format=quals_param)
    output_fhd.close()


def run_pipeline(analyses, sequence_path, results_path, force=False):
    prefix_fmt="%s_%s"
                   
    for analysis in analyses:
        flowcell, lane, insert_size = analysis
        analysis_id = prefix_fmt % (flowcell, lane)
        analysis_cwd = os.path.join(results_path, analysis_id)

        res = find_pe_sequence_files(flowcell, lane, sequence_path)
        if res is not None:
            # by default do not overwrite existing results
            if os.path.exists(analysis_cwd):
                if not force:
                    logging.debug("%s: results path %s exists, skipping..." % (analysis_id, analysis_cwd))
                    continue
                else:
                    logging.debug("%s: results path %s exists, forcing run (possibly overwriting results)..." % (analysis_id, analysis_cwd))                
            else:
                # make directory for results
                logging.debug('%s: creating analysis results path %s' % (analysis_id, analysis_cwd))
                os.makedirs(analysis_cwd) 

            fastq_files, quals_param, read_length = res
            inner_dist = insert_size - read_length
            if inner_dist <= 0:
                logging.error("%s: (inner_dist < 0) insert_size: %d read_length: %d inner_dist: %d" % (analysis_id, insert_size, read_length, inner_dist))
                inner_dist = 200 - read_length
                logging.debug("%s: setting inner_dist to (200 - read_length)=%d" % (analysis_id, inner_dist))
            elif inner_dist < 100:
                logging.warning("%s: (inner_dist < 100) insert_size: %d read_length: %d inner_dist: %d" % (analysis_id, insert_size, read_length, inner_dist))   
            unaligned_fastq_files = get_nonmapping_reads(analysis_id, fastq_files, quals_param, read_length, analysis_cwd)
            run_novoalign(analysis_id, unaligned_fastq_files, quals_param, insert_size, analysis_cwd)


def get_analyses_from_samples(sample_xmlfile):
    analyses = []
    for sample_group in parse_samplegroups_xml(options.samples):
        for sample in sample_group.samples:
            insert_size = get_sampledb().params[sample]['insert_size']
            for fclane in get_sampledb().params[sample]['lanes']:
                logging.debug('    sample: %s lanes: %s' %
                              (sample, fclane))
                analyses.append((fclane[0], fclane[1], insert_size))
    return analyses

if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)

    optionparser = OptionParser("usage: %prog [options] [-s <samples.xml>] <sequence_path> <results_path>")
    optionparser.add_option("-s", "--samples", dest="samples",
                            help="process only the samples defined in the sample XML file")                            
    (options, args) = optionparser.parse_args()
    if options.samples is None:
        optionparser.error("No samples specified")
    else:
        analyses = get_analyses_from_samples(options.samples)
    # positional arguments
    sequence_path = os.path.abspath(args[0])
    results_path = os.path.abspath(args[1])

    # log the command-line arguments
    logging.debug('sequence_path: %s', sequence_path)
    logging.debug('results_path: %s', results_path)
    logging.debug('analyses: %s', analyses)

    # make directory for results
    if not os.path.exists(results_path):
        logging.debug('Creating results path %s' % results_path)
        os.makedirs(results_path)

    # run rnaseq pipeline
    run_pipeline(analyses, sequence_path, results_path)