'''
Created on Feb 26, 2010

@author: mkiyer
'''
# built-in python imports
import collections
import os
import pickle
import logging
import subprocess
import shutil
import sys
import math
import operator

# globally installed packages
import ruffus
from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, \
    Numeric, Date
from sqlalchemy.sql import distinct, select, outerjoin, exists, and_, or_, not_
import pysam

# project imports
import veggie.alignment.sam as sam
from veggie.genome.genome import get_genome

# local imports
import pipelineconfig
import chipseqdb


class AnalysisTask(object):
    def __init__(self):
        self.analysis_id = None
        self.output_dir = None
        self.method_id = None
        self.method_name = None
        self.method_sw_version = None
        self.method_command_options = None
        self.t_alignment_file = None
        self.c_alignment_file = None

class PeakInfo(object):
    def __init__(self):
        self.chrom = None
        self.start = None
        self.end = None
        self.normalized_rank = 0
        self.summit = None
        self.t_tags = None
        self.c_tags = None
        self.neglog10p = None
        self.neglog10fdr = None
        self.fold_change = None
    
    def __str__(self):        
        return '\t'.join([self.chrom,
                          str(self.start),
                          str(self.end),
                          str(self.normalized_rank),
                          str(self.summit),
                          str(self.t_tags),
                          str(self.c_tags),
                          str(self.neglog10p),
                          str(self.neglog10fdr),
                          str(self.fold_change)])

def touch(path):
    import os, time
    now = time.time()
    try:
        # assume it's there
        os.utime(path, (now, now))
    except os.error:
        # if it isn't, try creating the directory,
        # a file with that name
        if not os.path.exists(os.path.dirname(path)):
            os.makedirs(os.path.dirname(path))
        open(path, "w").close()
        os.utime(path, (now, now))

def update_analysis_status(db, task_info, status_flag):
    logger = logging.getLogger(task_info.analysis_id)
    logger.debug('Marking analysis status_flag=%s' % (status_flag))
    db.set_analysis_status_flag(task_info.analysis_id, status_flag)


def write_peaks(peaks, analysis_id, outfhd):
    for peak in peaks:
        s = '\t'.join([peak.chrom,
                       str(peak.start),
                       str(peak.end),
                       analysis_id,
                       '%.3f' % peak.normalized_rank,
                       str(peak.summit),
                       str(peak.t_tags),
                       str(peak.c_tags),
                       str(peak.neglog10p),
                       str(peak.neglog10fdr),
                       str(peak.fold_change)])
        outfhd.write(s + '\n')

def write_peak_bed_file(peaks, analysis_id, outfhd):
    for i,peak in enumerate(peaks):
        s = '\t'.join([peak.chrom,
                       str(peak.start),
                       str(peak.end),
                       '%s_peak_%d' % (analysis_id, i),
                       '%.3f' % peak.normalized_rank])
        outfhd.write(s + '\n')

def bed_to_bigbed(input_file, output_file, task_info):
    logger = logging.getLogger(task_info.analysis_id)
    # write the autosql field required for bigBed
    as_string = '''\
table bed5
"Browser extensible data"
    (
    string chrom;      "Reference sequence chromosome or scaffold"
    uint   chromStart; "Start position in chromosome"
    uint   chromEnd;   "End position in chromosome"
    string name;       "Name of item"
    float  score;      "Score from 0-1000"
    )
'''
    asfile = os.path.join(os.path.dirname(output_file), 'bed5.as')
    f = open(asfile, 'w')
    f.writelines(as_string)
    f.close()
    # clip the peaks that are outside chromosome boundaries    
    chrom_sizes_file = task_info.chrom_sizes_file  
    next_input_file = input_file
    # sort the peaks according to tool requirement
    sorted_input_file = os.path.splitext(next_input_file)[0] + '.sorted.bed'
    cmd = 'sort -k1,1 -k2,2n ' + input_file + ' > ' + sorted_input_file
    os.system(cmd)
    next_input_file = sorted_input_file
    # ex. bedClip input.bed chrom.sizes output.bed
    clipped_input_file = os.path.splitext(next_input_file)[0] + '.clip.bed'
    executable = os.path.join(task_info.ucsc_bin_dir, 'bedClip')
    cmd = ' '.join([executable,
                    '-verbose=2',
                    next_input_file,
                    chrom_sizes_file,
                    clipped_input_file])
    retcode = os.system(cmd)
    if retcode != 0:
        return retcode
    next_input_file = clipped_input_file      
    # run the bedToBigBed program 
    executable = os.path.join(task_info.ucsc_bin_dir, 'bedToBigBed')
    bed_fields = 5    
    #bedToBigBed -as=bedExample2.as -bedFields=9 bedExample2.bed hg18.chrom.sizes myBigBed2.bb
    cmd = ' '.join([executable,
                    '-as=%s' % asfile,
                    '-bedFields=%d' % bed_fields,
                    next_input_file,
                    chrom_sizes_file,
                    output_file])
    logger.debug("bedToBigBed command line: %s" % cmd)
    retcode = os.system(cmd)
    return retcode

def sort_peaks(peaks, sort_attr='neglog10p'):
    # sort peaks
    sorted_peaks = sorted(peaks, key=operator.attrgetter(sort_attr), reverse=True)
    npeaks = len(sorted_peaks)
    # assign normalized rank
    for i, peak in enumerate(sorted_peaks):
        peak.normalized_rank = 1000.0 * float(npeaks - i)/npeaks
    return sorted_peaks

def convert_sam_to_bed(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        # chrom, start, end, name, score, strand
        s = '\t'.join([samfile.getrname(read.rname),
                       str(read.pos),
                       str(read.pos + read.rlen),
                       infile.split('.')[0],
                       '0.0',
                       strand])
        outfhd.write(s + '\n')
    samfile.close()
    outfhd.close()
    return outfile

def convert_sam_to_hpeak_bed(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        # chrom, start, end, name, score, strand
        s = '\t'.join([samfile.getrname(read.rname),
                       str(read.pos),
                       str(read.pos + read.rlen),
                       strand])
        outfhd.write(s + '\n')
    samfile.close()
    outfhd.close()
    return outfile

def convert_sam_to_raw_tags(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        outfhd.write('%s\t%d\t%s\n' % (samfile.getrname(read.rname),
                                       read.pos,
                                       strand))
    samfile.close()
    outfhd.close()
    return outfile

def parse_sicer_peaks(infile):    
    '''
    chrom, start, end, ChIP_island_read_count, CONTROL_island_read_count, p_value, fold_change, FDR_threshold
    Example:
    chr1    0       599     22      3       2.30617547841e-16       10.2073449862   3.33970473458e-15
    '''    
    for line in open(infile):
        if line.startswith('#'):
            # skip comments
            continue
        fields = line.strip().split('\t')
        peak = PeakInfo()
        # read raw fields        
        peak.chrom = fields[0]
        peak.start = max(0, int(fields[1]))
        peak.end = int(fields[2])
        peak.summit = int((peak.end - peak.start)/2.0)                
        peak.t_tags = int(fields[3])
        peak.c_tags = int(fields[4])
        pval = float(fields[5])
        if pval == 0:
            pval = 1.0e-200
        neglog10p = -math.log10(pval)
        peak.neglog10p = neglog10p
        peak.fold_change = float(fields[6])
        fdr = float(fields[7])
        if fdr == 0:
            fdr = 1.0e-200
        peak.neglog10fdr = -math.log10(fdr)
        yield peak
        
def run_sicer(tinfo, outfile):
    logger = logging.getLogger(tinfo.analysis_id)
    # get params from task info object
    t_file = tinfo.t_alignment_file
    c_file = tinfo.c_alignment_file
    name = tinfo.method_name
    cwd = tinfo.output_dir
    command_params = tinfo.method_command_options
    sw_version = tinfo.method_sw_version
    # convert t_file and c_file to BED format    
    bed_t_file = 'treatment.bed'
    bed_c_file = 'control.bed'
    convert_sam_to_bed(t_file, os.path.join(cwd, bed_t_file))
    convert_sam_to_bed(c_file, os.path.join(cwd, bed_c_file))
    # parse args
    myargs = command_params.split()
    window_size, gap_size, fdr = myargs[0], myargs[1], myargs[2]
    # run sicer
    import veggie.app.chipseq.sicer as sicer    
    sicer_app = sicer.SICER(cwd=cwd)
    retcode, result_paths = sicer_app.run(bed_t_file, bed_c_file, 
                                          window_size=window_size, 
                                          gap_size=gap_size, 
                                          fdr=fdr)
    if retcode != 0:
        return retcode, None    
    # process output
    sicer_output_file = os.path.join(cwd, result_paths[0])
    logger.debug("SICER command line: %s" % (sicer_app.getCommandLine()))    
    logger.debug("SICER output file: %s" % sicer_output_file)
    logger.debug("SICER experiment name: %s" % name)
    # convert SICER output to standard peak output format
    peaks = list(parse_sicer_peaks(sicer_output_file))
    # sort peaks and compute normalized rank
    peaks = sort_peaks(peaks, sort_attr='neglog10p')
    return retcode, peaks

def parse_hpeak_peaks(infile):    
    '''
    Indicating chromosome, start and end location, and the length (in bp) of 
    all enriched regions. The last column contains the maximum coverage among 
    all bins in this region. Note that the first column only contains the 
    chromosome number, and X and Y are replaced by 23 and 24 for easier 
    numerical manipulations. 

    This is the new main output file. Compared to .allregions.txt, the only 
    difference is an inserted extra column (column 5) which indicates the 
    location within the peak that has the highest hypothetical DNA fragment 
    (HDF) coverage). Note that the first column only contains the chromosome 
    number, and X and Y are replaced by 23 and 24 for easier numerical 
    manipulations.
    
    example:
    1       941451  941625  175     41.5    17.2047414700467        -0.988265142857143
    '''
    import math
    for line in open(infile):
        if line.startswith('#'):
            # skip comments
            continue
        peak = PeakInfo()        
        fields = line.strip().split('\t')
        # read raw fields
        chrom_num = int(fields[0])
        if chrom_num == 23:
            chrom = 'chrX'
        elif chrom_num == 24:
            chrom = 'chrY'
        else:
            chrom = 'chr' + str(chrom_num)        
        peak.chrom = chrom
        peak.start = max(0, int(fields[1]))
        peak.end = int(fields[2])
        length = int(fields[3])
        peak.summit = float(fields[4])
        peak.t_tags = float(fields[5])
        log10p = float(fields[6])
        peak.neglog10p = -log10p
        yield peak

def run_hpeak(tinfo, outfile):
    logger = logging.getLogger(tinfo.analysis_id)
    # get params from task info object
    t_file = tinfo.t_alignment_file
    c_file = tinfo.c_alignment_file
    name = tinfo.method_name
    cwd = tinfo.output_dir
    command_params = tinfo.method_command_options
    # convert t_file and c_file to hpeak format    
    bed_t_file = 'treatment.hpeak_bed'
    #bed_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.hpeak_bed'
    convert_sam_to_hpeak_bed(t_file, os.path.join(cwd, bed_t_file))
    if c_file is not None:
        bed_c_file = 'control.hpeak_bed'
        #bed_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.hpeak_bed'
        convert_sam_to_hpeak_bed(c_file, os.path.join(cwd, bed_c_file))
    else:
        bed_c_file = None
    # add command line params
    hpeak_args = command_params.split()    
    # run hpeak
    import veggie.app.chipseq.hpeak as hpeak
    hpeak_app = hpeak.HPeak(cwd=cwd, args=hpeak_args)    
    retcode, result_paths = hpeak_app.run(bed_t_file, bed_c_file, format='BED', name=name)
    if retcode != 0:
        return retcode, None
    peaks_file = os.path.join(cwd, result_paths[0])
    # process output
    peaks = list(parse_hpeak_peaks(peaks_file))
    # sort peaks and compute normalized rank
    peaks = sort_peaks(peaks, sort_attr='neglog10p')
    return retcode, peaks

def parse_macs_peaks(infile):
    '''chr     start   end     length  summit  tags    -10*log10(pvalue)       fold_enrichment FDR(%)'''
    fhd = open(infile)       
    # find header line
    for line in fhd:
        if line.startswith('#'):
            continue
        break
    header = line
    # read peaks
    for line in fhd:
        if line.startswith('#'):
            continue
        fields = line.strip().split('\t')
        peak = PeakInfo()
        peak.chrom = fields[0]
        peak.start = max(0, int(fields[1]))        
        peak.end = int(fields[2])
        length = int(fields[3])
        peak.summit = int(fields[4])
        peak.t_tags = int(fields[5])
        peak.neglog10p = float(fields[6]) / 10.0
        peak.fold_change = float(fields[7])
        # if run without control there is no FDR
        if len(fields) > 8: 
            fdr = float(fields[8])
            if fdr == 0.0:
                fdr = 1.0e-200
            peak.neglog10fdr = -math.log10(fdr)
        yield peak
        
def run_macs(tinfo, outfile):
    logger = logging.getLogger(tinfo.analysis_id)
    # get params from task info object
    t_file = tinfo.t_alignment_file
    c_file = tinfo.c_alignment_file
    name = tinfo.method_name
    cwd = tinfo.output_dir
    command_params = tinfo.method_command_options
    # macs requires SAM input so convert
    # treatment
    sam_t_file = 'treatment.sam'
    #sam_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.sam'
    sam_t_path = os.path.join(cwd, sam_t_file)
    if not os.path.exists(sam_t_path):
        logger.debug("Converting BAM file %s -> SAM for MACS" % (t_file))
        subprocess.call(["samtools", "view", t_file, "-o", sam_t_path])
    # control
    if c_file is not None:
        sam_c_file = 'control.sam'
        #sam_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.sam'
        sam_c_path = os.path.join(cwd, sam_c_file)
        if not os.path.exists(sam_c_path):
            logger.debug("Converting BAM file %s -> SAM for MACS" % (c_file))
            subprocess.call(["samtools", "view", c_file, "-o", sam_c_path])
    else:
        sam_c_file = None
    # get tag size
    samfile = pysam.Samfile(t_file, "rb")
    for read in samfile.fetch():
        read_length = read.rlen
        break
    samfile.close()
    # default options
    macs_args = command_params.split()
    macs_options = {'--tsize': read_length}
    import veggie.app.chipseq.macs as macs
    macs_app = macs.MACS(options=macs_options, args=macs_args, cwd=cwd)    
    retcode, result_paths = macs_app.run(sam_t_file, sam_c_file, format="SAM", name=name)
    if retcode != 0:
        return retcode, None
    # process output
    peaks_file = os.path.join(cwd, result_paths[0])
    peaks = list(parse_macs_peaks(peaks_file))
    # sort peaks and compute normalized rank
    peaks = sort_peaks(peaks, sort_attr='neglog10p')
    return retcode, peaks


def write_chipdiff_config(filename):
    '''
    maxIterationNum 500
    minP            0.95
    maxTrainingSeqNum       10000
    minFoldChange   3.0
    minRegionDist   1000
    '''
    pass  

def parse_chipdiff_peaks(region_file, bin_file):
    '''the region file contains <chrom> <start> <end> <+/->
       the bin file contains:
       <chromosome>
       <range of histone modification region>
       <start of the bin>
       <count in L1> <count in L2> 
       < probability of being L1 enriched DHMS>
       < probability of being non-differential site> 
       < probability of being L2 enriched DHMS> 
       <state of the bin>
    '''
    for line in open(region_file):
        fields = line.strip().split('\t')
        if fields[3] == '-':
            continue
        peak = PeakInfo()
        peak.chrom = fields[0]
        peak.start = max(0, int(fields[1]))
        peak.end = int(fields[2])
        peak.summit = int((peak.end - peak.start)/2.0)
        # TODO: parse bin file to get more information about peaks
        yield peak

def run_chipdiff(tinfo, outfile):
    logger = logging.getLogger(tinfo.analysis_id)
    # get params from task info object
    t_file = tinfo.t_alignment_file
    c_file = tinfo.c_alignment_file
    name = tinfo.method_name
    cwd = tinfo.output_dir
    command_params = tinfo.method_command_options
    # convert to the input file format required by ChIPDiff
    tag_t_file = 'treatment.tags'
    tag_c_file = 'control.tags'
    #tag_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.tags'
    #tag_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.tags'
    convert_sam_to_raw_tags(t_file, os.path.join(cwd, tag_t_file))
    convert_sam_to_raw_tags(c_file, os.path.join(cwd, tag_c_file))
    # run chipdiff
    import veggie.app.chipseq.chipdiff as chipdiff
    chipdiff_app = chipdiff.ChIPDiff(cwd=cwd)
    # TODO: figure out args for chipdiff
    retcode, result_paths = chipdiff_app.run(tag_t_file, tag_c_file, 
                                             tinfo.chrom_sizes_file,
                                             name=name)
    if retcode != 0:
        return retcode, None    
    chipdiff_output_file = os.path.join(cwd, result_paths['region'])
    logger.debug("ChIPDiff experiment name: %s" % name)
    logger.debug("ChIPDiff command line: %s" % (chipdiff_app.getCommandLine()))    
    logger.debug("ChIPDiff output file: %s" % chipdiff_output_file)
    # process output
    peaks = list(parse_chipdiff_peaks(chipdiff_output_file, None))
    # sort peaks and compute normalized rank
    peaks = sort_peaks(peaks, sort_attr='neglog10p')
    return retcode, peaks


def setup_analysis_files(ainfo, db, sample_pools_dir, output_dir):
    logger = logging.getLogger(__name__)
    
    class_type_inputfile_map = {}
    for class_type, class_id in ainfo.class_type_id_map.iteritems():
        sample_pool_ids = ainfo.class_type_sample_pool_map[class_type]
        for sample_pool_id in sample_pool_ids:
            # TODO: this is a hard-coded string that depends on the samplepools configuration
            # make this more robust
            alignment_file = os.path.join(sample_pools_dir, sample_pool_id, 'merged_alignments.bam')
            if not os.path.exists(alignment_file):
                logging.error("%s: could not find alignment file %s for analysis" %
                              (ainfo.id, alignment_file))
                return None
            # TODO: only allow one input file per class?
            class_type_inputfile_map[class_type] = alignment_file

    # setup a new working directory    
    if not os.path.exists(output_dir):
        logger.debug('%s: creating analysis results path %s' % 
                     (ainfo.id, output_dir))
        os.makedirs(output_dir)

    # setup class directories
    class_input_files_map = {}
    for class_type, class_id in ainfo.class_type_id_map.iteritems():
        class_dir = os.path.join(output_dir, class_id)
        if not os.path.exists(class_dir):
            logger.debug('%s: creating class path %s' % 
                         (ainfo.id, class_dir))
            os.makedirs(class_dir)
        # copy alignment files
        inputfile = class_type_inputfile_map[class_type]
        dstfile = os.path.join(class_dir, class_id + '_input.bam')
        logger.debug('%s: copying input file %s to %s' % (ainfo.id, inputfile, dstfile))
        shutil.copyfile(inputfile, dstfile)
        # setup map of input files
        class_input_files_map[class_type] = dstfile
        # copy bigwig files
        sample_pool_ids = ainfo.class_type_sample_pool_map[class_type]
        for sample_pool_id in sample_pool_ids:        
            sample_pool_output_map = db.get_sample_pool_output(sample_pool_id)            
            for tag, filename in sample_pool_output_map.iteritems():
                srcfile = os.path.join(sample_pools_dir, sample_pool_id, filename)
                dstfile = os.path.join(class_dir, class_id + '_' + tag + '.bw') 
                logger.debug('%s: copying sample pool output file %s to %s' % (ainfo.id, srcfile, dstfile))
                shutil.copyfile(srcfile, dstfile)    
    return class_input_files_map


@ruffus.split(["config.xml"], "*/*/analysis_info.pickle")
def generate_analysis_task(input_files, output_files):
    logger = logging.getLogger(__name__)
    # read configuration
    logger.info("Parsing configuration file...")
    config = pipelineconfig.PipelineConfig(input_files[0])
    # change directories to the base output dir
    # (some paths are relative to current working dir)
    assert os.path.exists(config.output_dir)
    os.chdir(config.output_dir)
    # setup output paths
    sample_pools_dir = os.path.join(config.output_dir, config.sample_pools_dir)
    analysis_dir = os.path.join(config.output_dir, config.analysis_dir)

    # output relevant config
    logger.info("Genome version: %s" % config.genome_version)
    logger.info("Base output dir: %s" % config.output_dir)
    logger.info("Sample pools dir: %s" % sample_pools_dir)
    logger.info("Analysis output dir: %s" % analysis_dir)
    logger.info("Force rerun of all analyses: %s" % config.analysis_force)
    
    # fetch the chromosome sizes using UCSC's script
    logger.info("Fetching chrom sizes from UCSC using MYSQL query...")
    chrom_sizes_file = 'ucsc.chrom.sizes'
    executable = os.path.join(config.ucsc_bin_dir, 'fetchChromSizes')
    cmd = ' '.join(['sh', executable, config.genome_version, '>', chrom_sizes_file])
    os.system(cmd) 
    
    # connect to database
    connection_string, schema_name = config.databases[config.db]    
    logger.info("Connecting to database [%s] schema=[%s] at %s" % (config.db, schema_name, connection_string))
    db = chipseqdb.ChIPSeqDB(connection_string, schema_name, echo=False)

    # get analysis info and make tasks
    logger.info("Fetching analysis tasks...")
    for ainfo in db.get_analysis_info():
        output_dir = os.path.join(analysis_dir, ainfo.id)        
        # look for required sample pool files and copy them to an analysis directory
        class_input_files_map = setup_analysis_files(ainfo, db, sample_pools_dir, output_dir)
        if class_input_files_map == None:
            logger.error("%s: error setting up analysis" % (ainfo.id))
            continue
        # build an AnalysisTask
        task_info = AnalysisTask()
        # allow task to access the database
        task_info.db_connection_string = connection_string
        task_info.db_schema_name = schema_name
        task_info.analysis_force = config.analysis_force
        task_info.analysis_id = ainfo.id
        task_info.output_dir = os.path.join(analysis_dir, ainfo.id)
        task_info.method_id = ainfo.method_id
        task_info.method_name = ainfo.method_name
        task_info.method_sw_version = ainfo.method_sw_version
        task_info.method_command_options = ainfo.method_command_options
        # TODO: are we going to have more types of classes?
        task_info.t_alignment_file = class_input_files_map[db.CLASS_TYPE_EXPERIMENT]
        task_info.c_alignment_file = class_input_files_map.get(db.CLASS_TYPE_CONTROL, None)
        # copy the chrom sizes file to the task directory
        task_info.ucsc_bin_dir = config.ucsc_bin_dir
        task_info.chrom_sizes_file = 'ucsc.chrom.sizes'
        shutil.copyfile(chrom_sizes_file, os.path.join(task_info.output_dir, task_info.chrom_sizes_file))
        # write task info to pickle file
        task_info_file = os.path.join(task_info.output_dir, 'analysis_info.pickle')
        pickle.dump(task_info, open(task_info_file, 'w'))
        logger.debug("%s: created analysis task" % (ainfo.id))


@ruffus.transform(generate_analysis_task, 
                  ruffus.regex(r"(.*)/(.*)/analysis_info.pickle$"),
                  r"\1/\2/peaks.bed", r"\2")                   
def peak_finder_task(input_file, output_file, analysis_id):
    logger = logging.getLogger(analysis_id)
    task_info = pickle.load(open(input_file))
    # connect to the database
    logger.info("Connecting to database...")
    db = chipseqdb.ChIPSeqDB(task_info.db_connection_string, 
                             task_info.db_schema_name, 
                             echo=False,
                             reflect=False)
    # skip samples that are marked complete
    if db.analysis_previously_completed(task_info.analysis_id):
        logger.info("Analysis status indicates it was previously run, force rerun=%s" % 
                    (task_info.analysis_force))
        if task_info.analysis_force == False:
            logger.info("Skipping this analysis...")
            touch(output_file)
            return
    # mark the analysis status as running
    update_analysis_status(db, task_info, chipseqdb.ChIPSeqDB.ANALYSIS_STATUS_RUNNING)
    # choose software method
    method_name_func_map = {'MACS': run_macs,
                            'HPEAK': run_hpeak,
                            'SICER': run_sicer,
                            'CHIPDIFF': run_chipdiff}
    # run method
    logger.debug("Running %s to find peaks..." % (task_info.method_name))
    retcode, peaks = method_name_func_map[task_info.method_name](task_info, output_file)
    if retcode != 0:
        # mark analysis status failed
        logger.error("Peak finder %s returned error code %d" % (task_info.method_name, retcode))
        update_analysis_status(db, task_info, chipseqdb.ChIPSeqDB.ANALYSIS_STATUS_FAILED)
        touch(output_file)
        return
    # upload peaks to database
    logger.debug("Uploading peaks to database...")   
    db.insert_peaks(task_info.analysis_id, peaks, replace=True)    
    # write peaks to output file
    peaks_file = os.path.splitext(output_file)[0] + '.txt'
    logger.debug("Writing peaks to file %s..." % peaks_file)
    outfhd = open(peaks_file, 'w')
    write_peaks(peaks, task_info.analysis_id, outfhd)
    outfhd.close()
    # write BED file for peak locations
    logger.debug("Creating peaks BED %s file for UCSC browser..." % output_file)
    bedfhd = open(output_file, 'w')
    write_peak_bed_file(peaks, task_info.analysis_id, bedfhd)
    bedfhd.close()
    # mark analysis status complete
    update_analysis_status(db, task_info, chipseqdb.ChIPSeqDB.ANALYSIS_STATUS_COMPLETE) 

@ruffus.transform(peak_finder_task, 
                  ruffus.regex(r"(.*)/(.*)/peaks.bed$"),
                  r"\1/\2/peaks.bb", r"\2")              
def bed_to_bb_task(input_file, output_file, analysis_id):
    logger = logging.getLogger(analysis_id)
    # get task info
    task_info_file = os.path.join(os.path.dirname(input_file), 'analysis_info.pickle')
    task_info = pickle.load(open(task_info_file)) 
    # see if BED file exists
    bed_file = input_file
    if not os.path.exists(bed_file):
        logger.warning("BED file does not exist, skipping conversion to BIGBED")
        return
    # convert bed to bigBed
    logger.debug("Converting BED %s --> BIGBED %s" % (bed_file, output_file))
    retcode = bed_to_bigbed(input_file, output_file, task_info)
    if retcode != 0:
        logger.error("bedToBigBed returned non-zero error code")

if __name__ == '__main__':
    from optparse import OptionParser    
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)
    # parse command line arguments
    optionparser = OptionParser("usage: %prog [options]")
    optionparser.add_option("-p", "--processors", dest="processors", type="int", default=4,
                            help="process only the samples defined in the sample XML file")                       
    (options, args) = optionparser.parse_args()
    # run the pipeline
    ruffus.pipeline_run([generate_analysis_task], verbose=2, multiprocess=options.processors)
    #ruffus.pipeline_run([peak_finder_task], verbose=2, multiprocess=options.processors)
    #ruffus.pipeline_run([bed_to_bb_task], verbose=2, multiprocess=options.processors)
