'''
Created on Dec 4, 2009

@author: mkiyer
'''
import ruffus
from optparse import OptionParser
import glob
import logging
import os
import re
import sys
import pickle
import subprocess
import pysam
import shutil

from veggie.genome.chrom import HG18
import veggie.alignment.sam as sam
import veggie.app.alignment.picard as picard
import xml.etree.cElementTree as etree

class ChIPSeqSample(object):

    def __init__(self, name=None, source=None, treatment=None, antibody=None, libraries=None):
        self.name = name
        self.source = source
        self.treatment = treatment
        self.antibody = antibody
        if libraries is None:
            self.libraries = []
        else:
            self.libraries = libraries

    @staticmethod
    def parse_xml(elem):
        assert elem.tag == 'sample'
        name_elem = elem.find('name')
        name = name_elem.text if name_elem is not None else None
        source_elem = elem.find('source')
        source = source_elem.text if source_elem is not None else None
        treatment_elem = elem.find('treatment')
        treatment = treatment_elem.text if treatment_elem is not None else None
        antibody_elem = elem.find('antibody')
        antibody = antibody_elem.text if antibody_elem is not None else None
        libraries = [x.text for x in elem.findall('library')]
        return ChIPSeqSample(name, source, treatment, antibody, libraries)

class ChIPSeqExperiment(object):
    def __init__(self, exper=None, control=None, signal=None):
        self.exper = exper
        self.control = control
        self.signal = signal

    @staticmethod
    def parse_xml(elem):
        assert elem.tag == 'experiment'
        #name_elem = elem.find('name')
        #name = name_elem.text if name_elem is not None else None
        exper_elem = elem.find('exper')
        exper = exper_elem.text if exper_elem is not None else None
        control_elem = elem.find('control')
        control = control_elem.text if control_elem is not None else None
        signal_elem = elem.find('signal')
        signal = signal_elem.text if signal_elem is not None else None
        return exper, control, signal

def parse_chipseq_xml(xmlfile):
    logger = logging.getLogger(__name__)    
    tree = etree.parse(xmlfile)
    root = tree.getroot()
    assert root.tag == 'chipseq'
    samples = {}
    samples_elem = root.find('samples')
    for elem in samples_elem.findall('sample'):
        sample = ChIPSeqSample.parse_xml(elem)
        samples[sample.name] = sample
    # create an empty sample for use when exper/control is None
    experiments_elem = root.find('experiments')
    for elem in experiments_elem.findall('experiment'):
        exper, control, signal = ChIPSeqExperiment.parse_xml(elem)
        if exper in samples:
            exper = samples[exper]
        elif exper == "None":
            exper = None
        else:
            logger.error("Unrecognized experiment <exper> name %s" % exper)
            continue
        if control in samples:
            control = samples[control]
        elif control == "None":
            control = None
        else:
            logger.error("Unrecognized experiment <control> name %s" % control)
            continue
        yield ChIPSeqExperiment(exper, control, signal)

def parse_config_xml(xmlfile):
    tree = etree.parse(xmlfile)
    root = tree.getroot()
    assert root.tag == 'config'
    alignment_path_elem = root.find('alignment_path')
    alignment_path = alignment_path_elem.text if alignment_path_elem is not None else None
    return alignment_path

@ruffus.split(["chipseq.xml", "config.xml"], "*.exper")
def generate_experiments(input_files, output_files):
    """
    returns one list of parameters per job
    """
    logger = logging.getLogger(__name__)
    # parse input files
    chipseq_xmlfile, config_xmlfile = input_files
    assert os.path.exists(chipseq_xmlfile)
    assert os.path.exists(config_xmlfile)
    # load the configuration
    alignment_path = parse_config_xml(config_xmlfile)
    assert os.path.exists(alignment_path)
    # load the samples and experiments
    for experiment in parse_chipseq_xml(chipseq_xmlfile):
        if experiment.exper is None:
            logger.error("Experiment is None, skipping")
            continue
        # make a name for this experiment
        experiment_name = [experiment.exper.name]
        # make sure all requested libraries exist
        exper_libraries = {}
        for library_id in experiment.exper.libraries:
            library_file = os.path.join(alignment_path, library_id + '.sam')            
            if not os.path.exists(library_file):
                logger.error("%s: alignment file %s not found, skipping library" % (library_id, library_file))
                continue
            exper_libraries[library_id] = library_file
        # modify the libraries attribute
        if (len(exper_libraries) == 0):
            logger.warning("%s: no alignment files found, skipping experiment" % (experiment.exper.name))
            continue
        # save libraries in the experiment object
        experiment.exper.libraries = exper_libraries
        # now do the same for controls
        control_libraries = {}
        if experiment.control is None:
            logger.warning("Warning, no control specified")
            experiment_name.append("None")
        else:
            for library_id in experiment.control.libraries:
                library_file = os.path.join(alignment_path, library_id + '.sam')            
                if not os.path.exists(library_file):
                    logger.error("%s: alignment file %s not found, skipping library" % (library_id, library_file))
                    continue
                control_libraries[library_id] = library_file
            if len(control_libraries) == 0:
                logger.warning("%s: no alignment files found, running without a control" % (experiment.control.name))
                experiment.control = None
                experiment_name.append("None")
            else:
                # save libraries
                experiment.control.libraries = control_libraries
                experiment_name.append(experiment.control.name)
        # generate params for setup pipeline
        experiment_file = '_vs_'.join(experiment_name)
        experiment_file += '.exper'
        # write experiment object to a file
        pickle.dump(experiment, open(experiment_file, 'w'))


@ruffus.split(generate_experiments, "*_merge.pickle")
def setup_merge_task(input_files, merge_jobs_file):
    files_to_merge = set()
    # build set of merges to perform
    for exper_file in input_files:
        experiment = pickle.load(open(exper_file))
        # add experimental group
        input_files = tuple(experiment.exper.libraries.values())
        output_file = experiment.exper.name + '.bam'
        files_to_merge.add((input_files, output_file))
        # add control group
        if experiment.control is not None:
            input_files = tuple(experiment.control.libraries.values())
            output_file = experiment.control.name + '.bam'
            files_to_merge.add((input_files, output_file))
    for merge_job in files_to_merge:
        input_files, output_file = merge_job
        merge_job_file = os.path.splitext(output_file)[0] + "_merge.pickle"
        pickle.dump(merge_job, open(merge_job_file, 'w'))

def filter_references(insamfile, outsamfile,
                      references=None):    
    if (references is None):
        return
    logger = logging.getLogger(__name__)
    outsam = open(outsamfile, "w")
    insam = open(insamfile, "r")
    for header_line in insam:
        # once we find the first line that does not start with '@' the header is over
        if not header_line.startswith("@"):
            break
        header_fields = header_line.strip().split('\t')
        if header_fields[0] == '@SQ':
            fields = dict([field.split(':', 1) for field in header_fields[1:]])
            assert 'SN' in fields
            if fields['SN'] not in references:
                logger.debug('filter_references: rejected reference %s' % fields['SN'])
                continue        
        outsam.write(header_line)
    # still have to deal with the first read before we can start a new loop
    rejected_reads = 0    
    getrname = lambda r: r.split('\t', 3)[2]
    read_line = header_line
    rname = getrname(read_line)
    if rname == '*':
        outsam.write(read_line)
    elif rname in references:
        outsam.write(read_line)
    else:
        rejected_reads += 1
    # loop through rest of reads
    for read_line in insam:
        rname = getrname(read_line)
        if rname == '*':
            outsam.write(read_line)
        elif rname in references:
            outsam.write(read_line)
        else:
            rejected_reads += 1    
    insam.close()
    outsam.close()
    logger.debug('filter_references: removed %d reads from non-matching references' % (rejected_reads))
    return outsamfile


def convert_sam_to_bam(insamfiles, bamfile, tmp_path=None, references=None):
    if references is None:
        references = HG18.chrom_sizes
    logger = logging.getLogger(__name__)
    tmpfiles = []
    mergebamfiles = []
    if tmp_path is None:
        tmp_path = os.path.abspath(os.path.dirname(bamfile))
    # first have to convert the SAM files to BAM and prepare them
    for insamfile in insamfiles:
        # remove references that are contamination
        myfiltersam = os.path.join(tmp_path, os.path.splitext(os.path.basename(insamfile))[0] + '.filter.sam')
        tmpfiles.append(myfiltersam)
        logger.debug("Filtering references from SAM %s -> SAM %s" % (insamfile, myfiltersam))
        filter_references(insamfile, myfiltersam, references=references)
        nextinfile = myfiltersam
        # convert to BAM
        mybamfile = os.path.splitext(nextinfile)[0] + '.bam'
        tmpfiles.append(mybamfile)
        logger.debug("Converting SAM %s -> BAM %s" % (nextinfile, mybamfile))
        subprocess.call(['samtools', 'view', '-S', nextinfile, '-b', '-o', mybamfile])
        nextinfile = mybamfile 
        # sort
        mysortedbam = os.path.splitext(nextinfile)[0] + '.sorted.bam'
        tmpfiles.append(mysortedbam)
        logger.debug("Sorting BAM %s -> %s" % (nextinfile, mysortedbam))
        # sort automatically adds ".bam" to the file, so remove the extension
        subprocess.call(['samtools', 'sort', nextinfile, os.path.splitext(mysortedbam)[0]])
        # index        
        logger.debug("Indexing Sorting BAM %s" % (mysortedbam))
        subprocess.call(['samtools', 'index', mysortedbam])
        tmpfiles.append(mysortedbam + '.bai')
        nextinfile = mysortedbam
        # remove duplicates
        myrmdupbam = os.path.splitext(nextinfile)[0] + '.rmdup.bam'
        tmpfiles.append(myrmdupbam)
        logger.debug("Removing duplicates from BAM %s -> %s" % (nextinfile, myrmdupbam))
        subprocess.call(['samtools', 'rmdup', '-s', nextinfile, myrmdupbam])
        nextinfile = myrmdupbam
        # files to merge
        mergebamfiles.append(nextinfile)        
    # merge
    if len(mergebamfiles) == 1:
        # just one file, so copy it rather than merge it
        # it is already sorted too.. so just re-index it
        logger.debug("Copying BAM file %s -> %s" % (mergebamfiles[0], bamfile))
        shutil.copyfile(mergebamfiles[0], bamfile)
    else:
        #mymergedbam = os.path.splitext(bamfile)[0] + '.merge.bam'
        #tmpfiles.append(mymergedbam)
        merge_cmd = ['samtools', 'merge', bamfile]
        merge_cmd.extend(mergebamfiles)
        logger.debug("Merging BAM files %s -> %s" % (mergebamfiles, bamfile))
        subprocess.call(merge_cmd)
        # sort
        #logger.debug("Sorting BAM file %s -> %s" % (mymergedbam, bamfile))
        # sort automatically adds ".bam" to the file, so remove the extension
        #subprocess.call(['samtools', 'sort', mymergedbam, os.path.splitext(bamfile)[0]])
    logger.debug("Indexing BAM file %s" % (bamfile))
    subprocess.call(['samtools', 'index', bamfile])
    # remove temporary files
    for tmpfile in tmpfiles:
        logger.debug("Removing %s" % tmpfile)
        os.remove(tmpfile)
        
@ruffus.transform(setup_merge_task, ruffus.regex(r"(.+)_merge.pickle$"), r"\1.bam")
def merge_sam_to_bam_task(input_file, output_file):
    logger = logging.getLogger(__name__)
    sam_files, bam_file = pickle.load(open(input_file))
    # merge the SAM files from each group into a single BAM file
    if len(sam_files) == 0:
        return
    if not os.path.exists(bam_file):
        convert_sam_to_bam(sam_files, bam_file)
    else:
        logger.debug('BAM file %s exists, skipping..' % bam_file)

@ruffus.transform(merge_sam_to_bam_task, ruffus.suffix(".bam"), ".wig.gz")
def bam_to_wiggle_task(input_file, output_file):
    logger = logging.getLogger(__name__)
    logger.debug("Converting BAM %s -> WIG %s" % (input_file, output_file))
    if not os.path.exists(input_file):
        return
    # TODO: load the cDNA fragment size from config file
    sam.bam_to_wiggle(input_file, output_file, 
                      unique_only=False, 
                      merge_strands=True, 
                      fragment_length=200, 
                      norm=False)
    # convert wig to bigwig
#    infile = output_file
#    bigfile = output_file.split(os.path.extsep)[0] + '.bw'
#    logger.debug("Running UCSC wigToBigWig:")
#    logger.debug("\tinput file: %s" % output_file)
#    logger.debug("\toutput file: %s" % bigfile)
#    import veggie.app.ucsc as ucsc
#    retcode = ucsc.wigToBigWig(infile, bigfile)
#    if retcode != 0:
#        logger.error("wigToBigWig returned error code %d" % retcode)

def convert_sam_to_bed(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        # chrom, start, end, name, score, strand
        s = '\t'.join([samfile.getrname(read.rname),
                       str(read.pos),
                       str(read.pos + read.rlen),
                       infile.split('.')[0],
                       '0.0',
                       strand])
        outfhd.write(s + '\n')
    samfile.close()
    outfhd.close()
    return outfile

def convert_sam_to_hpeak_bed(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        # chrom, start, end, name, score, strand
        s = '\t'.join([samfile.getrname(read.rname),
                       str(read.pos),
                       str(read.pos + read.rlen),
                       strand])
        outfhd.write(s + '\n')
    samfile.close()
    outfhd.close()
    return outfile

def convert_sam_to_raw_tags(infile, outfile):
    samfile = pysam.Samfile(infile, "rb")
    outfhd = open(outfile, 'w')
    for read in samfile.fetch():
        if read.is_qcfail or read.is_unmapped:
            continue
        strand = '-' if read.is_reverse else '+'
        outfhd.write('%s\t%d\t%s\n' % (samfile.getrname(read.rname),
                                       read.pos,
                                       strand))
    samfile.close()
    outfhd.close()
    return outfile

def run_sicer(t_file, c_file, cwd, name):
    logger = logging.getLogger(__name__)
    # convert t_file and c_file to BED format    
    bed_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.bed'
    bed_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.bed'
    convert_sam_to_bed(t_file, os.path.join(cwd, bed_t_file))
    convert_sam_to_bed(c_file, os.path.join(cwd, bed_c_file))
    # run sicer
    import veggie.app.chipseq.sicer as sicer
    sicer_app = sicer.SICER(cwd=cwd)
    sicer_app.run(bed_t_file, bed_c_file)    
    sicer_output_file = sicer_app.get_result_paths()[0]
    logger.debug("SICER output file: %s" % sicer_output_file)
    logger.debug("SICER experiment name: %s" % name)
    # TODO: convert sicer output to BED
    #dst_file = name + '.peaks'
    #shutil.copy(sicer_output_file, dst_file)

def run_hpeak(t_file, c_file, cwd, name):
    logger = logging.getLogger(__name__)
    import veggie.app.chipseq.hpeak as hpeak
    # convert t_file and c_file to hpeak format    
    bed_t_file = os.path.splitext(os.path.basename(t_file))[0] + '.hpeak_bed'
    convert_sam_to_hpeak_bed(t_file, os.path.join(cwd, bed_t_file))
    if c_file is not None:
        bed_c_file = os.path.splitext(os.path.basename(c_file))[0] + '.hpeak_bed'
        convert_sam_to_hpeak_bed(c_file, os.path.join(cwd, bed_c_file))
    else:
        bed_c_file = None
    # default options
    hpeak_options = {'-fmin': 175,
                     '-fmax': 225,
                     '-sig': '1e-3',
                     '-wig': None,
                     '-ann': None}
    hpeak_app = hpeak.HPeak(cwd=cwd, options=hpeak_options)
    return hpeak_app.run(bed_t_file, bed_c_file, format='BED', name=name)


def run_macs(t_file, c_file, cwd, name):
    logger = logging.getLogger(__name__)
    import veggie.app.chipseq.macs as macs
    # macs requires SAM input so convert
    # treatment
    sam_t_file = os.path.splitext(t_file)[0] + '.sam'    
    sam_t_path = os.path.join(cwd, sam_t_file)
    if not os.path.exists(sam_t_path):
        logger.debug("Converting BAM file %s -> SAM for MACS" % (t_file))
        subprocess.call(["samtools", "view", t_file, "-o", sam_t_path])
    # control
    if c_file is not None:
        sam_c_file = os.path.splitext(c_file)[0] + '.sam'
        sam_c_path = os.path.join(cwd, sam_c_file)
        if not os.path.exists(sam_c_path):
            logger.debug("Converting BAM file %s -> SAM for MACS" % (c_file))
            subprocess.call(["samtools", "view", c_file, "-o", sam_c_path])
    else:
        sam_c_file = None
    # get tag size
    samfile = pysam.Samfile(t_file, "rb")
    for read in samfile.fetch():
        read_length = read.rlen
        break
    samfile.close()
    # default options
    macs_options = {'--mfold': 16,
                    '--tsize': read_length, 
                    '--pvalue': '1e-5',
                    '--verbose': 2}    
    macs_app = macs.MACS(options=macs_options, cwd=cwd)    
    return macs_app.run(sam_t_file, sam_c_file, format="SAM", name=name)


def write_chipdiff_config(filename):
    '''
    maxIterationNum 500
    minP            0.95
    maxTrainingSeqNum       10000
    minFoldChange   3.0
    minRegionDist   1000
    '''
    pass  

def run_chipdiff(t_file, c_file, cwd, name):
    # convert to the input file format required by ChIPDiff
    tag_t_file = os.path.splitext(t_file)[0] + '.tags'
    tag_c_file = os.path.splitext(c_file)[0] + '.tags'
    convert_sam_to_raw_tags(t_file, os.path.join(cwd, tag_t_file))
    convert_sam_to_raw_tags(c_file, os.path.join(cwd, tag_c_file))
    # run chipdiff
    import veggie.app.chipseq.chipdiff as chipdiff
    chipdiff_app = chipdiff.ChIPDiff(cwd=cwd)
    return chipdiff_app.run_basic(tag_t_file, tag_c_file, name=name)
    
@ruffus.follows(merge_sam_to_bam_task)
@ruffus.transform(generate_experiments, ruffus.regex(r"(.+).exper$"), r"\1.done")
def peak_finder_task(input_file, output_file):
    logger = logging.getLogger(__name__)
    experiment = pickle.load(open(input_file))
    experiment_name = os.path.splitext(input_file)[0]
    experiment_cwd = experiment_name
    
    t_file = experiment.exper.name + ".bam"
    if experiment.control is not None:
        c_file = experiment.control.name + ".bam"
    else:
        c_file = None
    
    # setup a new working directory for each experiment
    if not os.path.exists(experiment_name):
        logger.debug('%s: creating experiment results path %s' % (experiment_name, os.path.abspath(experiment_cwd)))
        os.makedirs(experiment_cwd)
        
    # choose peakfinder based on signal type
    retcodes = []
    if experiment.signal == 'TF' or experiment.signal == 'localized':
        logger.debug("%s: Running MACS to find peaks" % (experiment_name))
        retcode = run_macs(t_file, c_file, cwd=experiment_cwd, name='_'.join(['MACS', experiment_name]))
        retcodes.append(retcode)
        logger.debug("%s: Running HPeak to find peaks" % (experiment_name))
        retcode = run_hpeak(t_file, c_file, cwd=experiment_cwd, name='_'.join(['HPeak', experiment_name]))
        retcodes.append(retcode)
    elif experiment.signal == 'broad':
        if c_file is None:
            logger.error("%s: cannot run broad peak finders without a control" % (experiment_name))
        else:
            logger.debug("%s: Running ChIPDiff to find peaks" % (experiment_name))
            retcode = run_chipdiff(t_file, c_file, cwd=experiment_cwd, name='_'.join(['ChIPDiff', experiment_name]))
            retcodes.append(retcode)     
            logger.debug("%s: Running SICER to find peaks" % (experiment_name))
            retcode = run_sicer(t_file, c_file, cwd=experiment_cwd, name='_'.join(['SICER', experiment_name]))
            retcodes.append(retcode)
    else:
        logger.error("%s: Unrecognized ChIP-seq signal type %s" % (experiment_name, experiment.signal))
    # create file to indicate process finished successfully
    exper_failed = False
    for retcode in retcodes:
        if retcode != 0:
            exper_failed = True
            break
    if exper_failed == False:
        subprocess.call(["touch", output_file]) 

if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    
    # parse command line arguments
    optionparser = OptionParser("usage: %prog [options] <results_path>")
    optionparser.add_option("-p", "--processors", dest="processors", type="int", default=1,
                            help="process only the samples defined in the sample XML file")                       
    (options, args) = optionparser.parse_args()
    # validate command line arguments
    if len(args) == 0 or (not os.path.exists(args[0])):
        optionparser.error("Results path does not exist")
    results_path = args[0]

    chipseq_xmlfile = "chipseq.xml"
    config_xmlfile = "config.xml"
    assert os.path.exists(chipseq_xmlfile)
    assert os.path.exists(config_xmlfile)
    
    logger.info('ChIP-seq pipeline version 0.0.2')
    logger.info('-------------------------------')
    logger.info('chipseq experiment xml file: %s', chipseq_xmlfile)
    logger.info('config xml file: %s', config_xmlfile)
    logger.info('results_path: %s', results_path)

    # change directory to results path for subsequent steps
    os.chdir(results_path)    
    # run the pipeline
    ruffus.pipeline_run([peak_finder_task, bam_to_wiggle_task], verbose=2, multiprocess=options.processors)
    #ruffus.pipeline_run([bam_to_wiggle_task], verbose=2, multiprocess=options.processors)
