'''
Created on Feb 5, 2010

@author: mkiyer
'''
# built-in python imports
import collections
import os
import pickle
import logging
import subprocess
import shutil
import sys

# globally installed packages
import ruffus
from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, \
    Numeric, Date
from sqlalchemy.sql import distinct, select, outerjoin, exists, and_, or_, not_
import pysam

# project imports
import veggie.alignment.sam as sam
from veggie.genome.genome import get_genome

# local imports
import pipelineconfig

# global variables
sample_pools_dir = "sample_pools"
sample_pools_re = r"%s/(.*)" % sample_pools_dir
analysis_dir = "analysis"
analysis_re = r"%s/(.*)" % analysis_dir

_chipseq_db = None
def get_chipseq_db():
    global _chipseq_db
    if _chipseq_db == None:
        _chipseq_db = ChIPSeqDB()
    return _chipseq_db

class ChIPSeqDB(object):
    '''
    class for connecting to and retrieving data from chipseq database
    '''
    # constants
    CLASS_TYPE_EXPERIMENT = "Experiment"
    CLASS_TYPE_CONTROL = "Control"
    OUTPUT_TYPE_NORMALIZED = "normalized"
    OUTPUT_TYPE_RAW = "raw"
    
    def __init__(self, connection_string, schema_name, echo=False):
        '''
        creates an interface to chipseq db
        '''        
        # connect to database
        self.engine = create_engine(connection_string, echo=echo)
        self.schema = schema_name
        # reflect tables
        self.meta = MetaData()
        self.meta.bind = self.engine
        self.meta.reflect(bind=self.engine, schema=self.schema)
        # create shortcuts to tables
        self.analysis_main = self._get_table('analysis_main')
        self.analysis_methods = self._get_table('analysis_methods')
        self.analysis_peak_detail = self._get_table('analysis_peak_detail')
        self.analysis_signal = self._get_table('analysis_signal')
        self.analysis_class_map = self._get_table('analysis_class_map')
        self.class_sample_map = self._get_table('class_sample_map')
        self.sample_pool_main = self._get_table('sample_pool_main')
        self.sample_pool_library_map = self._get_table('sample_pool_library_map')
        self.sample_pool_output = self._get_table('sample_pool_output')

    def _get_table(self, tablename):
        return self.meta.tables['%s.%s' % (self.schema, tablename)]

    def get_sample_pool_libraries(self):
        tbl = self.sample_pool_library_map
        stmt = select([tbl.c.sample_pool_id, tbl.c.flowcell_id, tbl.c.lane_id])
        for res in stmt.execute():
            yield res.sample_pool_id, res.flowcell_id, res.lane_id

    def get_sample_pool_output(self, sp_id):
        tbl = self.sample_pool_output
        stmt = select([tbl.c.tag_name, tbl.c.file_name])
        output_files = {}
        for res in stmt.execute():
            output_files[res.tag_name] = res.file_name
        return output_files

class SamplePoolTask(object):
    def __init__(self):
        self.sample_pool_id = None
        self.output_dir = None
        self.output_file_map = None
        self.library_ids = []
        self.alignment_files = []

class AnalysisTask(object):
    def __init__(self):
        self.analysis_id = None
        self.cwd = None
        self.method_id = None
        self.method_name = None
        self.method_sw_version = None
        self.method_command_options = None
        self.t_alignment_file = None
        self.c_alignment_file = None


def convert_sam_to_bed(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        # chrom, start, end, name, score, strand
        s = '\t'.join([samfile.getrname(read.rname),
                       str(read.pos),
                       str(read.pos + read.rlen),
                       infile.split('.')[0],
                       '0.0',
                       strand])
        outfhd.write(s + '\n')
    samfile.close()
    outfhd.close()
    return outfile

def convert_sam_to_hpeak_bed(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        # chrom, start, end, name, score, strand
        s = '\t'.join([samfile.getrname(read.rname),
                       str(read.pos),
                       str(read.pos + read.rlen),
                       strand])
        outfhd.write(s + '\n')
    samfile.close()
    outfhd.close()
    return outfile

def convert_sam_to_raw_tags(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        outfhd.write('%s\t%d\t%s\n' % (samfile.getrname(read.rname),
                                       read.pos,
                                       strand))
    samfile.close()
    outfhd.close()
    return outfile

def run_sicer(t_file, c_file, name, cwd=None, command_params=None, sw_version=None):
    logger = logging.getLogger(__name__)
    # convert t_file and c_file to BED format    
    bed_t_file = 'treatment.bed'
    bed_c_file = 'control.bed'
    convert_sam_to_bed(t_file, os.path.join(cwd, bed_t_file))
    convert_sam_to_bed(c_file, os.path.join(cwd, bed_c_file))
    # parse args
    myargs = command_params.split()
    window_size, gap_size, fdr = myargs[0], myargs[1], myargs[2]
    # run sicer
    import veggie.app.chipseq.sicer as sicer    
    sicer_app = sicer.SICER(cwd=cwd)
    sicer_app.run(bed_t_file, bed_c_file, 
                  window_size=window_size, 
                  gap_size=gap_size, 
                  fdr=fdr)
    # process output
    logger.debug("Command line: %s" % (sicer_app.getCommandLine()))    
    sicer_output_file = sicer_app.get_result_paths()[0]
    logger.debug("SICER output file: %s" % sicer_output_file)
    logger.debug("SICER experiment name: %s" % name)
    # TODO: convert sicer output to BED
    #dst_file = name + '.peaks'
    #shutil.copy(sicer_output_file, dst_file)
    #return retcode

def run_hpeak(t_file, c_file, name, cwd, command_params=None, sw_version=None):
    logger = logging.getLogger(__name__)
    import veggie.app.chipseq.hpeak as hpeak
    # convert t_file and c_file to hpeak format    
    bed_t_file = 'treatment.hpeak_bed'
    #bed_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.hpeak_bed'
    convert_sam_to_hpeak_bed(t_file, os.path.join(cwd, bed_t_file))
    if c_file is not None:
        bed_c_file = 'control.hpeak_bed'
        #bed_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.hpeak_bed'
        convert_sam_to_hpeak_bed(c_file, os.path.join(cwd, bed_c_file))
    else:
        bed_c_file = None
    # add command line params
    hpeak_args = command_params.split()    
    hpeak_app = hpeak.HPeak(cwd=cwd, args=hpeak_args)    
    retcode = hpeak_app.run(bed_t_file, bed_c_file, format='BED', name=name)
    # process output
    return retcode    
#    hpeak_options = {'-fmin': 175,
#                     '-fmax': 225,
#                     '-sig': '1e-3',
#                     '-wig': None,
#                     '-ann': None}
#    hpeak_app = hpeak.HPeak(cwd=cwd, options=hpeak_options)

def run_macs(t_file, c_file, name, cwd, command_params=None, sw_version=None):
    logger = logging.getLogger(__name__)
    import veggie.app.chipseq.macs as macs
    # macs requires SAM input so convert
    # treatment
    sam_t_file = 'treatment.sam'
    #sam_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.sam'
    sam_t_path = os.path.join(cwd, sam_t_file)
    if not os.path.exists(sam_t_path):
        logger.debug("Converting BAM file %s -> SAM for MACS" % (t_file))
        subprocess.call(["samtools", "view", t_file, "-o", sam_t_path])
    # control
    if c_file is not None:
        sam_c_file = 'control.sam'
        #sam_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.sam'
        sam_c_path = os.path.join(cwd, sam_c_file)
        if not os.path.exists(sam_c_path):
            logger.debug("Converting BAM file %s -> SAM for MACS" % (c_file))
            subprocess.call(["samtools", "view", c_file, "-o", sam_c_path])
    else:
        sam_c_file = None
    # get tag size
    samfile = pysam.Samfile(t_file, "rb")
    for read in samfile.fetch():
        read_length = read.rlen
        break
    samfile.close()
    # default options
    macs_args = command_params.split()
    macs_options = {'--tsize': read_length}
    macs_app = macs.MACS(options=macs_options, args=macs_args, cwd=cwd)    
    retcode = macs_app.run(sam_t_file, sam_c_file, format="SAM", name=name)
    # process output
    return retcode
#    macs_options = {'--mfold': 16,
#                    '--tsize': read_length, 
#                    '--pvalue': '1e-3',
#                    '--verbose': 2}    


def write_chipdiff_config(filename):
    '''
    maxIterationNum 500
    minP            0.95
    maxTrainingSeqNum       10000
    minFoldChange   3.0
    minRegionDist   1000
    '''
    pass  

def run_chipdiff(t_file, c_file, name, cwd, command_params=None, sw_version=None):
    logger = logging.getLogger(__name__)    
    # convert to the input file format required by ChIPDiff
    tag_t_file = 'treatment.tags'
    tag_c_file = 'control.tags'
    #tag_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.tags'
    #tag_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.tags'
    convert_sam_to_raw_tags(t_file, os.path.join(cwd, tag_t_file))
    convert_sam_to_raw_tags(c_file, os.path.join(cwd, tag_c_file))
    # run chipdiff
    import veggie.app.chipseq.chipdiff as chipdiff
    chipdiff_app = chipdiff.ChIPDiff(cwd=cwd)
    # TODO: figure out args for chipdiff
    retcode = chipdiff_app.run_basic(tag_t_file, tag_c_file, name=name)
    logger.debug("Command line: %s" % (chipdiff_app.getCommandLine()))    
    return retcode

def filter_references(insamfile, outsamfile, references):    
    if (references == None):
        return
    logger = logging.getLogger(__name__)
    outsam = open(outsamfile, "w")
    insam = open(insamfile, "r")
    for header_line in insam:
        # once we find the first line that does not start with '@' the header is over
        if not header_line.startswith("@"):
            break
        header_fields = header_line.strip().split('\t')
        if header_fields[0] == '@SQ':
            fields = dict([field.split(':', 1) for field in header_fields[1:]])
            assert 'SN' in fields
            if fields['SN'] not in references:
                logger.debug('filter_references: rejected reference %s' % fields['SN'])
                continue        
        outsam.write(header_line)
    # still have to deal with the first read before we can start a new loop
    rejected_reads = 0    
    getrname = lambda r: r.split('\t', 3)[2]
    read_line = header_line
    rname = getrname(read_line)
    if rname == '*':
        outsam.write(read_line)
    elif rname in references:
        outsam.write(read_line)
    else:
        rejected_reads += 1
    # loop through rest of reads
    for read_line in insam:
        rname = getrname(read_line)
        if rname == '*':
            outsam.write(read_line)
        elif rname in references:
            outsam.write(read_line)
        else:
            rejected_reads += 1    
    insam.close()
    outsam.close()
    logger.debug('filter_references: removed %d reads from non-matching references' % (rejected_reads))
    return outsamfile

def convert_sam_to_bam(insamfiles, bamfile, references, tmp_path=None):
    logger = logging.getLogger(__name__)
    tmpfiles = []
    mergebamfiles = []
    if tmp_path == None:
        tmp_path = os.path.abspath(os.path.dirname(bamfile))
    # first have to convert the SAM files to BAM and prepare them
    for insamfile in insamfiles:
        # remove references that are contamination
        myfiltersam = os.path.join(tmp_path, os.path.splitext(os.path.basename(insamfile))[0] + '.filter.sam')
        tmpfiles.append(myfiltersam)
        logger.debug("Filtering references from SAM %s -> SAM %s" % (insamfile, myfiltersam))
        filter_references(insamfile, myfiltersam, references=references)
        nextinfile = myfiltersam
        # convert to BAM
        mybamfile = os.path.splitext(nextinfile)[0] + '.bam'
        tmpfiles.append(mybamfile)
        logger.debug("Converting SAM %s -> BAM %s" % (nextinfile, mybamfile))
        subprocess.call(['samtools', 'view', '-S', nextinfile, '-b', '-o', mybamfile])
        nextinfile = mybamfile 
        # sort
        mysortedbam = os.path.splitext(nextinfile)[0] + '.sorted.bam'
        tmpfiles.append(mysortedbam)
        logger.debug("Sorting BAM %s -> %s" % (nextinfile, mysortedbam))
        # sort automatically adds ".bam" to the file, so remove the extension
        subprocess.call(['samtools', 'sort', nextinfile, os.path.splitext(mysortedbam)[0]])
        # index        
        logger.debug("Indexing Sorting BAM %s" % (mysortedbam))
        subprocess.call(['samtools', 'index', mysortedbam])
        tmpfiles.append(mysortedbam + '.bai')
        nextinfile = mysortedbam
        # remove duplicates
        myrmdupbam = os.path.splitext(nextinfile)[0] + '.rmdup.bam'
        tmpfiles.append(myrmdupbam)
        logger.debug("Removing duplicates from BAM %s -> %s" % (nextinfile, myrmdupbam))
        subprocess.call(['samtools', 'rmdup', '-s', nextinfile, myrmdupbam])
        nextinfile = myrmdupbam
        # files to merge
        mergebamfiles.append(nextinfile)        
    # merge
    if len(mergebamfiles) == 1:
        # just one file, so copy it rather than merge it
        # it is already sorted too.. so just re-index it
        logger.debug("Copying BAM file %s -> %s" % (mergebamfiles[0], bamfile))
        shutil.copyfile(mergebamfiles[0], bamfile)
    else:
        #mymergedbam = os.path.splitext(bamfile)[0] + '.merge.bam'
        #tmpfiles.append(mymergedbam)
        merge_cmd = ['samtools', 'merge', bamfile]
        merge_cmd.extend(mergebamfiles)
        logger.debug("Merging BAM files %s -> %s" % (mergebamfiles, bamfile))
        subprocess.call(merge_cmd)
        # sort
        #logger.debug("Sorting BAM file %s -> %s" % (mymergedbam, bamfile))
        # sort automatically adds ".bam" to the file, so remove the extension
        #subprocess.call(['samtools', 'sort', mymergedbam, os.path.splitext(bamfile)[0]])
    logger.debug("Indexing BAM file %s" % (bamfile))
    subprocess.call(['samtools', 'index', bamfile])
    # remove temporary files
    for tmpfile in tmpfiles:
        logger.debug("Removing %s" % tmpfile)
        os.remove(tmpfile)

def wigToBigWig(infile, outfile, task_info):
    executable = os.path.join(task_info.ucsc_bin_dir, 'wigToBigWig')
    chrom_sizes_file = task_info.chrom_sizes_file
    cmd = ' '.join([executable, '-clip', infile, chrom_sizes_file, outfile])
    retcode = os.system(cmd)
    return retcode

@ruffus.split(["config.xml"], "*/*/sample_pool_info.pickle")
def generate_sample_pool_tasks(input_files, output_files):
    logger = logging.getLogger(__name__)
    # read configuration
    logger.info("Parsing configuration file...")
    config = pipelineconfig.PipelineConfig(input_files[0])
    # setup output path
    sample_pools_dir = os.path.join(config.output_dir, config.sample_pools_dir)
    # log sample pool configuration
    logger.info("Alignment results dir: %s" % config.alignment_dir)
    logger.info("Sample pool output dir: %s" % sample_pools_dir)
    logger.info("Normalized bigwigs: %s" % config.make_norm_bigwig)
    logger.info("Raw read bigwigs: %s" % config.make_raw_bigwig)
    logger.info("cDNA fragment length (for tag extension): %s" % config.cdna_fragment_length)

    # fetch the chromosome sizes using UCSC's script
    logger.info("Fetching chrom sizes from UCSC using MYSQL query...")
    chrom_sizes_file = 'ucsc.chrom.sizes'
    executable = os.path.join(config.ucsc_bin_dir, 'fetchChromSizes')
    cmd = ' '.join(['sh', executable, config.genome_version, '>', chrom_sizes_file])
    os.system(cmd) 

    # connect to database
    connection_string, schema_name = config.databases[config.db]    
    logger.info("Connecting to database [%s] schema=[%s] at %s..." % (config.db, schema_name, connection_string))
    db = ChIPSeqDB(connection_string, schema_name, echo=False)

    # associate libraries with sample pools
    sample_pools = collections.defaultdict(lambda: set())
    for sample_pool_id, flowcell_id, lane_id in db.get_sample_pool_libraries():
        # TODO: this is a hack to convert a flowcell/lane into a library id
        # for publicly available data this won't work.  We need to use the
        # sample ID to lookup where the alignments are stored
        library_id = '%s_%s' % (flowcell_id, lane_id)        
        alignment_file = os.path.join(config.alignment_dir, library_id + '.sam')
        if os.path.exists(alignment_file):
            sample_pools[sample_pool_id].add((library_id, alignment_file))
        else:
            logger.warning('SP_ID %s: could not find alignment results for library %s at path: %s' % 
                           (sample_pool_id, library_id, alignment_file))
    sample_pools = dict(sample_pools)

    # create tasks for each sample pool
    for sample_pool_id, sample_pool_libraries in sample_pools.iteritems():
        task_info = SamplePoolTask()
        task_info.sample_pool_id = sample_pool_id
        task_info.genome_version = config.genome_version
        task_info.fragment_length = config.cdna_fragment_length
        task_info.make_norm_bigwig = config.make_norm_bigwig
        task_info.make_raw_bigwig = config.make_raw_bigwig
        task_info.output_dir = os.path.join(sample_pools_dir, sample_pool_id)
        task_info.output_file_map = db.get_sample_pool_output(sample_pool_id)
        for library_info in sample_pool_libraries:
            library_id, alignment_file = library_info
            task_info.library_ids.append(library_id)
            task_info.alignment_files.append(alignment_file)
        # setup a new working directory for each sample pool
        if not os.path.exists(task_info.output_dir):
            logger.debug('%s: creating sample pool results path %s' % (sample_pool_id, task_info.output_dir))
            os.makedirs(task_info.output_dir)
        # TODO: instead of using pickle, use something human readable like XML
        task_info_file = os.path.join(task_info.output_dir, 'sample_pool_info.pickle')
        pickle.dump(task_info, open(task_info_file, 'w'))
        # copy the chrom sizes file to the task directory
        task_info.ucsc_bin_dir = config.ucsc_bin_dir
        task_info.chrom_sizes_file = 'ucsc.chrom.sizes'
        shutil.copyfile(chrom_sizes_file, os.path.join(task_info.output_dir, task_info.chrom_sizes_file))

@ruffus.transform(generate_sample_pool_tasks, 
                  ruffus.regex(sample_pools_dir + r"/(.*)/sample_pool_info.pickle$"), 
                  sample_pools_dir + r"/\1/merged_alignments.bam", r"\1")
def merge_alignments_task(infile, outfile, sample_pool_id):
    logger = logging.getLogger(sample_pool_id)
    task_info = pickle.load(open(infile))
    if len(task_info.alignment_files) == 0:
        return
    if not os.path.exists(outfile):
        mygenome = get_genome(task_info.genome_version)
        chrom_names = mygenome.get_chrom_names()
        convert_sam_to_bam(task_info.alignment_files, outfile, chrom_names)
    else:
        logger.debug('BAM file %s exists, skipping..' % outfile)

@ruffus.transform(merge_alignments_task, 
                  ruffus.regex(sample_pools_dir + r"/(.*)/merged_alignments.bam$"), 
                  sample_pools_dir + r"/\1/sample_pools.done", r"\1")
def bam_to_wiggle_task(input_file, output_file, sample_pool_id):
    logger = logging.getLogger(sample_pool_id)
    if not os.path.exists(input_file):
        logger.error("Input file %s does not exist" % (input_file))
        return
    # get task info
    task_info_file = os.path.join(os.path.dirname(input_file), "sample_pool_info.pickle")
    task_info = pickle.load(open(task_info_file))
    # get output dir
    output_dir = os.path.dirname(output_file)
    # make bigwig files
    noerror = True
    if task_info.make_norm_bigwig:
        wigfile = os.path.join(output_dir, 'norm.wig')
        logger.debug("Converting BAM %s -> normalized wiggle %s" % (input_file, wigfile))
        sam.bam_to_wiggle(input_file, wigfile, 
                          unique_only=False, 
                          merge_strands=True, 
                          fragment_length=task_info.fragment_length, 
                          norm=True)
        # convert to bigwig
        bwfile = os.path.join(output_dir, task_info.output_file_map[ChIPSeqDB.OUTPUT_TYPE_NORMALIZED])
        logger.debug("Converting WIG %s -> BIGWIG %s" % (wigfile, bwfile))
        retcode = wigToBigWig(wigfile, bwfile, task_info)
        if retcode != 0:
            noerror = False
            logger.error("wigToBigWig returned error code %d" % retcode)        
    
    if task_info.make_raw_bigwig:
        wigfile = os.path.join(output_dir, 'raw.wig')
        logger.debug("Converting BAM %s -> raw wiggle %s" % (input_file, wigfile))
        sam.bam_to_wiggle(input_file, wigfile, 
                          unique_only=False, 
                          merge_strands=True, 
                          fragment_length=task_info.fragment_length, 
                          norm=False)
        # convert to bigwig
        bwfile = os.path.join(output_dir, task_info.output_file_map[ChIPSeqDB.OUTPUT_TYPE_RAW])
        logger.debug("Converting WIG %s -> BIGWIG %s" % (wigfile, bwfile))
        retcode = wigToBigWig(wigfile, bwfile, task_info)
        if retcode != 0:
            noerror = False
            logger.error("wigToBigWig returned error code %d" % retcode)        

    if noerror == True:
        # write a blank file signifying task finished successfully
        open(output_file, 'w').close()

@ruffus.split(["config.xml"], "*/*/analysis_info.pickle")
def generate_analysis_task(infile, outfile):
    logger = logging.getLogger(__name__)
    # output path
    chipseq_path = '/home/mkiyer/chipseq_pipeline'
    chipseq_analysis_path = os.path.join(chipseq_path, analysis_dir)
    chipseq_sample_path = os.path.join(chipseq_path, sample_pools_dir)
    # load the chipseq database    
    cdb = get_chipseq_db()
    # get all the analyses
    tbl = cdb.chipseq_main 
    stmt = select([tbl.c.analysis_id, 
                   tbl.c.analysis_name,
                   tbl.c.method_id],
                   tbl.c.analysis_status != "hold")
    for analysis_res in stmt.execute():
        analysis_id, analysis_name, method_id = analysis_res        
        #
        # get method associated with this analysis
        #
        tbl = cdb.chipseq_methods
        stmt = select([tbl], tbl.c.method_id == method_id)
        method_res = stmt.execute().fetchone()
        if method_res == None:
            logger.warning("%s (%s): No method associated with this analysis" % (analysis_id, analysis_name))
            continue
        method_name = method_res.method_name
        method_sw_version = method_res.sw_version
        method_command_options = method_res.command_options
        #
        # get "classes" associated with each analysis
        #
        tbl = cdb.analysis_class_map
        stmt = select([tbl.c.class_id, tbl.c.s_flag],
                      tbl.c.analysis_id == analysis_id)
        class_id_type_map = {}
        for class_res in stmt.execute():
            class_id_type_map[class_res.class_id] = class_res.s_flag
        #
        # get samples associated with each class
        #
        tbl = cdb.class_sample_map    
        t_alignment_file = None
        c_alignment_file = None
        found_all_files = True
        for class_id, class_type in class_id_type_map.iteritems():          
            stmt = select([tbl.c.sample_pool_id, tbl.c.sample_pool_name],
                          tbl.c.class_id == class_id)
            # get sample pools
            for sample_res in stmt.execute():
                sample_pool_id = sample_res.sample_pool_id
                # see if there are alignment results for the sample pool
                alignment_file = os.path.join(chipseq_sample_path, sample_pool_id, 'merged_alignments.bam')
                if os.path.exists(alignment_file):
                    if class_type == cdb.class_type_experiment:
                        t_alignment_file = alignment_file
                    elif class_type == cdb.class_type_control:
                        c_alignment_file = alignment_file
                else:
                    logger.warning("%s (%s): Could not find alignment file %s" % (analysis_id, analysis_name, alignment_file))
                    found_all_files = False
        if (found_all_files == True) and (t_alignment_file != None):
            # build an AnalysisTask
            task_info = AnalysisTask()
            task_info.analysis_id = analysis_id
            task_info.cwd = os.path.join(chipseq_analysis_path, analysis_id)
            task_info.method_id = method_id
            task_info.method_name = method_name
            task_info.method_sw_version = method_sw_version
            task_info.method_command_options = method_command_options
            task_info.t_alignment_file = t_alignment_file
            task_info.c_alignment_file = c_alignment_file
            # setup a new working directory
            if not os.path.exists(task_info.cwd):
                logger.debug('%s: creating analysis results path %s' % (analysis_id, task_info.cwd))
                os.makedirs(task_info.cwd)
            task_info_file = os.path.join(task_info.cwd, 'analysis_info.pickle')
            pickle.dump(task_info, open(task_info_file, 'w'))
            logger.debug("%s: created analysis task" % (analysis_id))
        else:
            logger.warning("%s (%s): Could not create analysis task" % (analysis_id, analysis_name))

@ruffus.transform(generate_analysis_task, 
                  ruffus.regex(analysis_dir + r"/(.*)/analysis_info.pickle$"), 
                  analysis_dir + r"/\1/peaks.txt", 
                  r"\1")
def peak_finder_task(input_file, output_file, analysis_id):
    logger = logging.getLogger(__name__)
    task_info = pickle.load(open(input_file))
    # choose software method
    method_name = task_info.method_name
    # run method
    if method_name == 'MACS':
        logger.debug("%s: Running MACS to find peaks" % (task_info.analysis_id))        
        run_macs(task_info.t_alignment_file, 
                 task_info.c_alignment_file,
                 name=method_name,
                 cwd=task_info.cwd,
                 command_params=task_info.method_command_options,
                 sw_version=task_info.method_sw_version)
    elif method_name == 'HPEAK':
        logger.debug("%s: Running HPeak to find peaks" % (task_info.analysis_id))        
        run_hpeak(task_info.t_alignment_file, 
                  task_info.c_alignment_file,
                  name=method_name,
                  cwd=task_info.cwd,
                  command_params=task_info.method_command_options,
                  sw_version=task_info.method_sw_version)
    elif method_name == 'CHIPDIFF':
        logger.debug("%s: Running ChIPDiff to find peaks" % (task_info.analysis_id))        
        run_chipdiff(task_info.t_alignment_file, 
                     task_info.c_alignment_file,
                     name=method_name,
                     cwd=task_info.cwd,
                     command_params=task_info.method_command_options,
                     sw_version=task_info.method_sw_version)
    elif method_name == 'SICER':
        logger.debug("%s: Running SICER to find peaks" % (task_info.analysis_id))        
        run_sicer(task_info.t_alignment_file, 
                  task_info.c_alignment_file,
                  name=method_name,
                  cwd=task_info.cwd,
                  command_params=task_info.method_command_options,
                  sw_version=task_info.method_sw_version)


if __name__ == '__main__':
    from optparse import OptionParser    
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)
    # parse command line arguments
    optionparser = OptionParser("usage: %prog [options]")
    optionparser.add_option("-p", "--processors", dest="processors", type="int", default=4,
                            help="process only the samples defined in the sample XML file")                       
    (options, args) = optionparser.parse_args()
    # run the pipeline
    ruffus.pipeline_run([bam_to_wiggle_task], verbose=2, multiprocess=options.processors)    
    #ruffus.pipeline_run([peak_finder_task], verbose=2, multiprocess=options.processors)    
