"""
USAGE:
See http://code.google.com/p/chipseq-pipeline/wiki/WikiChipseqPipeline

TODO:
Incorporate the use of bamqc.py for alignment quality stats.

"""

import os
import subprocess
import sys
import tempfile
import re
import shutil
import datetime
import time
import socket
try:
    import pysam
except:
    pass

try:
    import psycopg2
except:
    pass

NO_SUBPROCS= 6      ## Number of subprocesses to run in parallel
NA_STRING= 'NA'      ## Missing values for converting files (bed) and importing to postgres
WIKI_DIR= 'redmine_wiki' ## Directory where to put stuff to export to redmine or to other destinations.
CMD_FILE= 'commands.log' ## File where to log all the relevant system commands
STDERR= 'stderr.log'     ## Files where stderr/out will go
STDOUT= 'stdout.log'

# ------------------------------------------------------------------------------
# Define functions
# ------------------------------------------------------------------------------

def log_cmd(log_file= CMD_FILE, logline= '', add_timestamp= True):
    """ Log the string logline to file log_file. Optionally with timestamp
    log_file is opened in append mode and reclosed.
    """
    timestamp= '[' + datetime.datetime.now().strftime('Date %d-%m-%Y Time %Hh %Mm %Ss') + '] '
    fin= open(CMD_FILE, 'a')
    if add_timestamp is True:
        fin.write(timestamp)
    fin.write(logline + '\n')
    fin.close()

def get_svn_info(module):
    """ Get the svn information for "module". Assuming module is under svn.
    USAGE:
    get_svn_info(chipseq_pipeline)
    
    RETURNS:
    A string with the output of svn info
    
    """    
    try:
        module_path= module.__file__
    except:
        return('get_svn_info: Error while executing: %s.__file__' %(module))
    cmd= "svn info %s" %(os.path.dirname(module_path))
    log_cmd(log_file= CMD_FILE, logline= cmd)
    try:
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= subprocess.PIPE)
    except:
        return('get_svn_info: Cannot execute %s' %(cmd))
    return("*svn information for %s:*\n\n" %(module) + p.stdout.read())

def make_wdir(wdir):
    """ recursivley make directory wdir if it doesn't exists."""
    if not os.path.exists(wdir):
        os.makedirs(wdir)

def limit_procs(no_procs, procs):
    """ Prevent a for-loop executing subprocess.Popen() from running more than
    no_procs at a time.
    Args:
    no_procs:
        Maximum number of subprocesses allowed.
    procs:
        A list of processes as produced by p= subprocess.Popen(); procs.append(p)        
    Returns an empty list meaning that there are no processes waiting to complete.
    """
    if len(procs) >= NO_SUBPROCS:
        for x in procs:
            x.wait()
        procs= []
    return(procs)

def read_design(design):
    """ Read the 'design' file and returns a dictionary as {column: [values]}
    E.g. {basename: ['ds001...', 'ds002', ...], input: []}
    Input column has the basename for each chipseq sample. NA if the sample is a input itself
    
    Trailing and leading spaces as they are not stripped!
    
    """
    dfile= open(design, 'r').readlines()
    design_df= []
    for line in dfile:
        if line.strip() == '' or line.startswith('#'):
            continue
        line= line.split('\t')
        line= [x.strip() for x in line]
        design_df.append(line)
    ## Check all the rows have the same number of columns
    ncol= [len(x) for x in design_df]
    if len(set(ncol)) > 1:
        sys.exit('chipseq_pipeline.read_design(%s): Unequal number of columns found: %s' %(design, ncol))
    
    design_dict= {}
    for d in design_df[0]:
        design_dict[d]= []
    
    for c in range(0, len(design_df[0])):
        col= []
        for r in range(0, len(design_df)):
            if r == 0:
                header= design_df[r][c]
            else:
                col.append(design_df[r][c])
        design_dict[header]= col
    return(design_dict)


def get_item_from_basename(basename, item_list):
    """
    Helper function to get the element in list item_list starting with 'basename'
    E.g.
    get_item_from_basename('ds001', ['ds001.bam', 'ds002.bam', 'ds001.txt', ]) >>> ['ds001.bam', 'ds001.txt']
    
    Returns a *list* with all the matching items
    """
    matched= []
    for x in item_list:
        if x.startswith(basename):
            matched.append(x)
    if matched == []:
        sys.exit('get_item_from_basename: Could not find %s in %s' %(basename, item_list))
    return(matched)

def rename_bam(dir, design):
    """ Renames all the files design$alignment_bam by replacing the string to the left of .bam with the string ing design$basename.
    
    dir <str>:
        Directory containing the bam files to rename (typically project-dir/bam_lims)
    design <str>:
        Design file containing columns "alignment_bam" and "basename" (typically project-dir/design.txt)
        
    E.g. design is:
    _____________________________________________________________
    | alignment_bam                           |basename         |
    +-----------------------------------------+-----------------|
    | SLX-3214.514.s_8.bwa.homo_sapiens.bam    ds001_mcf_dmso_1 |
    +-----------------------------------------+-----------------+
    
    If dir contains:
    SLX-3214.514.s_8.bwa.homo_sapiens.bam
    SLX-3214.514.s_8.bwa.homo_sapiens.bed
    SLX-3214.514.s_8.bwa.homo_sapiens.bai
    
    They will be rename to:
    ds001_mcf_dmso_1.bam
    ds001_mcf_dmso_1.bed
    ds001_mcf_dmso_1.bai
    
    >>> Renames alignent_bam >>> ds001_mcf_dmso_1.bam 
    
    """
    design= read_design(design)
    
    cwd= os.getcwd()
    os.chdir(dir)
    
    dir_files= os.listdir(os.getcwd())
    bams= [x for x in dir_files if x.endswith('.bam')]
    
    for old, new_basename in zip(design['alignment_bam'], design['basename']):
        original_basename= re.sub('\.bam$', '', old)  ##  old.rstrip('.bam')
        for f in dir_files:
            if f.startswith(original_basename):
                newname= f.replace(original_basename, new_basename)
                os.rename(f, newname)
                print('Renamed %s TO: %s' %(f, newname))
    os.chdir(cwd)


def aln_stats(aln_dir= 'bam_lims', outfile= 'samtools_flagstats.txt', mapq= 20):
    """ Executes samtools view on all the bam files in input dir.
    Outputs are send to bam_stats.txt"""
    print('\nExecuting aln_stats() ...')
    bams= os.listdir(aln_dir)
    bams.sort()
    bams= [x for x in bams if x.endswith('.bam')]
    bams_path= [os.path.join(aln_dir, x) for x in bams]
    procs= []
    for bam, bam_path in zip(bams, bams_path):
        stat_file= bam_path + '.stat'
        try:
            os.remove(stat_file)
        except:
            pass
        cmd= """samtools view -c %(bam_path)s >> %(stat_file)s
                samtools view -c -f 4 %(bam_path)s >> %(stat_file)s
                samtools view -c -q %(mapq)s %(bam_path)s >> %(stat_file)s """ %{'bam_path': bam_path, 'mapq':mapq, 'stat_file':stat_file}
        log_cmd(log_file= CMD_FILE, logline= cmd)
        p= subprocess.Popen(cmd, shell= True)
        procs.append(p)
        if len(procs) > NO_SUBPROCS:
            for x in procs:
                x.wait()
            procs= []
    for x in procs:
        x.wait()
    outstats= open(outfile, 'w')
    header= '\t'.join(['bam file', 'No. reads', 'Unmapped', '% unmapp.', 'mapq > %s' %(mapq), '% mapq'])
    outstats.write(header + '\n')
    for f, name in zip(bams_path, bams):
        stat= open(f + '.stat')
        fout= stat.readlines()
        if len(fout) != 3:
            return('Unexpected number of lines in %s' %(f + '.stat'))
        fout= [int(x.strip()) for x in fout]
        fout= [name] + [fout[0]] + [fout[1]] + [str(round(float(fout[1]) / float(fout[0]), 2)*100)] + [fout[2]] + [str(round(float(fout[2]) / float(fout[0]), 2)*100)]
        fout= [str(x) for x in fout]
        outstats.write('\t'.join(fout) + '\n')
        stat.close()
    outstats.close()
    for x in bams_path:
        x= x + '.stat'
        os.remove(x)
    if not os.path.exists(WIKI_DIR):
        os.mkdir(WIKI_DIR)
    shutil.move(outfile, WIKI_DIR)
    return()

def clean_bam(bam_lims_dir, bam_clean_dir= 'bam_clean', file_regex= '\.bam$', output_exts= '.clean.bam', opt= '-q 15'):
    """
    Filters bam files to remove reads with low mapq and overlapping black-list
    regions.
    Input is sorted bam files, output is filtered bam files and their index files
    
    ARGS:
    bam_lims_dir <str>:
        Directory where to find input *sorted* bamfiles
      
    bam_clean_dir <'bam_clean'>
        Directory where output files will go. Will be created if not exists
        
    file_regex <'\.bam$'>
        Use input files in bam_lims_dir matching this regex
        
    output_exts <'.clean.bam'>
        Extension for output files (the suffix '\.bam$' will be replaced by this)

    opt <''>:
        Further options to pass to samtools view. Do not include here the -b option
        (it's hardcoded in the function)
        Use: opt= '-L regions.bed' to include specific regions
        MEMO: To produce a bed file of regions excluding the black list use:
            subtractBed -a /data01/sblab/users/berald01/reference_seqs/human.hg18.chromosomes.bed -b wgEncodeHg18DukeSignalArtifactRegions.bed > /data01/sblab/users/berald01/reference_seqs/wgEncodeHg18DukeSignalArtifactRegions.whitelist.bed
            Where human.hg18.chromosomes.bed is formatted: < chrom   0   chrom-lenght >
    """
    
    if not os.path.exists(bam_clean_dir):
        os.makedirs(bam_clean_dir)

    bam_lims= os.listdir(bam_lims_dir)
    bam_lims.sort()
    bam_lims= [x for x in bam_lims if re.search(file_regex, x)]
    bam_clean= [re.sub('\.bam$', output_exts, x) for x in bam_lims]

    procs= []
    for bam, bam_new in zip(bam_lims, bam_clean):
        bamin= os.path.join(bam_lims_dir, bam)
        bamout= os.path.join(bam_clean_dir, bam)
        cmd= 'samtools view -b %s %s > %s ; samtools index %s' %(opt, bamin, bamout, bamout)
        print(cmd)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        p= subprocess.Popen( cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a') )
        procs.append(p)
        if len(procs) >= NO_SUBPROCS:
            " Do not spawn more than NO_PROCS processes "
            for x in procs:
                x.wait()
            procs= []
    for x in procs:
        x.wait()
  

def homer_maketagdirs(input_dir, homertag_dir= 'tag_directories', echo= False, file_regex= '\.bam$', opt= ''):
    """
    Executes homer makeTagDirectory on all the files fin input_dir matched by the regex file_regex.
    
    homertag_dir:
        Directory which will contain the created tag directories. It will be created or overwritten. The layout will be:
        [homertag_dir]
            |
            + tagdir1
            + tagdir2
            + ...
    input_dir:
        Directory with the input BED files. Typically project-dir/bed_clean 
    opt:
        Further options passed to makeTagDirectory
        makeTagDirectory <directory> <alignment file 1> [file 2] ... [options]
    """
  
    if not os.path.exists(homertag_dir):
        os.makedirs(homertag_dir)
    
    beds= sorted(os.listdir(input_dir))
    beds= [x for x in beds if re.search(file_regex, x)]
    
    tagdir_name= [re.sub(file_regex, '', x) for x in beds]  ## [x.rstrip('.bed') for x in beds]
    tagdir_name_path= [os.path.join(homertag_dir, x) for x in tagdir_name]
    
    beds_path= [os.path.join(input_dir, x) for x in beds]
    
    procs= []
    for tagdir, bed in zip(tagdir_name_path, beds_path):
        cmd= 'makeTagDirectory %(directory)s %(aln)s %(options)s ' %{'directory': tagdir, 'aln': bed, 'options': opt}
        print(cmd)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        if echo is False:
            p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
            procs.append(p)
            if len(procs) >= NO_SUBPROCS:
                for x in procs:
                    x.wait()
                procs= []
        else:
            continue
    for x in procs:
        x.wait()
       
def homer_findPeaks(tag_dir, design, findpeaks_dir= 'findpeaks', opt= '-center', echo= False, file_regex= '.*'):
    """ Executes homer findPeaks on all the tag directories in dir tag_dir.
    Samples are matched to input controls using the columns basename and input in the design file.
    
    tag_dir <str>:
        Path to dir containing the tag directories typically os.path.join(project-dir, "/homer/tag_directories") . E.g.
        [tag_dir]
            |
            + tagdir1
            + tagdir2
            + ...
    design <str>
        Design file containing columns "basename" and "input" to match. Typically os.path.join(project-dir, "design.txt")
    findpeaks_dir <str>
        Path to output dir. will be created if doesn't exists. Typically os.path.join(project_dir, 'homer', 'findpeaks')
    opt <str>
        String of options to verbatim to findPeaks.
    echo:
        If True just print out the system command and exit
    
    Output files will be named *basename*.findPeaks where basename name comes from design['basename']
        """
    
    if not os.path.exists(findpeaks_dir):
        os.makedirs(findpeaks_dir)
       
    tagdirs= sorted(os.listdir(tag_dir))
    tagdirs= [x for x in tagdirs if re.search(file_regex, x)]
    design= read_design(design)
    procs= []
    for basename, input in zip(design['basename'], design['input']):
        outfile= os.path.join(findpeaks_dir, basename) + '.findPeaks'
        
        tags= get_item_from_basename(basename, tagdirs)[0] ## Get tag directory from basename
        tagdir_path= os.path.join(tag_dir, tags)
        
        input= get_item_from_basename(input, tagdirs)[0] ## Get input ctrl directory
        inputdir_path= os.path.join(tag_dir, input)
    
        cmd= 'findPeaks %s -i %s -o %s %s' %(tagdir_path, inputdir_path, outfile, opt)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        if echo is False:
            p= subprocess.Popen(cmd, shell= True)
            procs.append(p)
            if len(procs) >= NO_SUBPROCS:
                for x in procs:
                    x.wait()
                procs= []
        else:
            continue
    for x in procs:
        x.wait()
    
    
def get_findPeaks_summary(peak_dir, file_regex= '\.findPeaks$', summary_file= 'findpeaks_summary.txt'):
    """ Get the summary stats lines from findPeaks outputs.
    peak_dir:
        Direcotry where the peak files are
    peak_exts:
        Extension to recognize which files should be processed
    summary_file:
        Output file to capture stdout from get_findPeaks_summary.py. If it has
        full path, sub-directories will be created if not exist.
        
    Output goes to stdout using get_findPeaks_summary.py
    
    """
    
    peak_files= sorted(os.listdir(peak_dir))
    peak_files= [x for x in peak_files if re.search(file_regex, x)]
    peak_files_path= [os.path.join(peak_dir, x) for x in peak_files]
    
    header= ['tag dir.', 'tot. peaks', 'peak size', 'peaks found', 'min. dist.', 'frag. length', 'genome size', 'tot. tags', 'tot. tags in peaks', 'IP eff.', 'tags per bp', 'expected tags per peak', 'maximum tags considered per bp', 'effective number of tags used for normalization', 'Peaks have been', 'FDR rate threshold', 'FDR effective poisson threshold', 'FDR tag threshold', 'number of putative peaks', 'input tag directory', 'Fold over input required', 'Poisson p-value over input required', 'Putative peaks filtered by input', 'size of region used for local filtering', 'Fold over local region required', 'Poisson p-value over local region required', 'Putative peaks filtered by local signal', 'Maximum fold under expected unique positions for tags', 'Putative peaks filtered for being too clonal', 'cmd']
    header= '\t'.join(header)
    
    (outdir, summary)= os.path.split(summary_file)
    make_wdir(outdir)
    
    if os.path.exists(summary_file):
        os.remove(summary_file)

    p= subprocess.Popen("echo '%s' >> %s " %(header, summary_file), shell= True)
    p.wait()
    procs= []
    for f in peak_files_path:
        cmd= 'get_findPeaks_summary.py %s >> %s' %(f, summary_file)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        p= subprocess.Popen(cmd, shell= True)
        procs.append(p)
        if len(procs) >= NO_SUBPROCS:
            for x in procs:
                x.wait()
            procs= []
    for x in procs:
        x.wait()
    ## Parse the output of summary_file to have it less verbose
    summary_file= 'homer/peak_summary/findpeaks_summary.txt'
    all_summary= open(summary_file).readlines()
    all_summary= [x.rstrip('\n\r') for x in all_summary]
    all_summary= [x.split('\t') for x in all_summary]
    for line in all_summary:
        line[0]= os.path.split(line[0])[1]
    select_cols= [0,1,2,4,5,7,8,9]   ## Indexes of the columns you want to keep
    sel_summary=[]
    for line in all_summary:
        line= [line[i] for i in select_cols]
        sel_summary.append(line)
    out_red_summary= open(summary_file + '.reduced', 'w')
    for line in sel_summary:
        line= '\t'.join(line)
        out_red_summary.write(line + '\n')
    out_red_summary.close()

def mergePeaks(peak_dir, merged_file= 'peaks.merged', opt= '', file_regex= '\.findPeaks$', echo= False, bed= False):
    """ Uses mergePeaks to merge all the peak files in peak_dir.
    It outputs the file from mergePeaks and also the same file converted to bed as *merged_file*.bed
    
    peak_dir:
        Dir where input files are
    merged_file:
        Output file name. If with path, sub-dirs will be created as necessary.
    opt:
        String of arguments to pass to mergePeaks (e.g. opt: '-d 500')
    peak_exts:
        Only files enbding with this extension will be merged
    echo:
        If True just print out the system command and exit.
    """
    peaks= os.listdir(peak_dir)
    peaks= [x for x in peaks if re.search(file_regex, x)]
    peaks_path= [os.path.join(peak_dir, x) for x in peaks]
    
    (outdir, myfile)= os.path.split(merged_file)
    make_wdir(outdir)
    
    if os.path.exists(merged_file):
        os.remove(merged_file)
    
    if bed is True:
        " Convert each BED to findpeaks format "
        tmp_peaks= []
        tmpdir= tempfile.gettempdir()
        for bed, peak in zip(peaks_path, peaks):
            tmp_peak_file= os.path.join(tmpdir, peak)
            bed_to_homer_peaks(bed, peak_file= tmp_peak_file)
            tmp_peaks.append(tmp_peak_file)
        del peaks_path
        peaks_path= tmp_peaks
        
    cmd= 'mergePeaks %s %s > %s' %(opt, ' '.join(peaks_path), merged_file)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    procs= []
    if echo is not True:
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
        procs.append(p)
        if len(procs) >= NO_SUBPROCS:
            for x in procs:
                x.wait()
            procs= []
    else:
        return()
    for x in procs:
        x.wait()
    homer_peaks_to_bed(merged_file, skip= 0)
    
def homer_peaks_to_bed(homer_file, skip= 0, replace_empty= NA_STRING):
    """
    Convert output from findPeaks, mergePeaks, annotatePeaks in the homer suite to BED. skip is to skip possible header lines
    """
    bed= open(homer_file + '.bed', 'w')
    fin= open(homer_file)
    i= 0
    for line in fin:
        if i < skip:
            i += 1
            continue
        line= line.strip('\n\r')
        line= line.split('\t')        
        line2= []
        for x in line:
            if x == '':
                line2.append(replace_empty)
            else:
                line2.append(x)
        bed_line= [line2[1]] + [line2[2]] + [line2[3]] + [line2[0]] + [line2[5]] + [line2[4]] + ['\t'.join(line2[6:])]
        bed_line= '\t'.join(bed_line)
        bed.write(bed_line + '\n')
    bed.close()
    fin.close()

def bed_to_homer_peaks(bed_file, peak_file= None):
    """
    Convert a bed file to format suitable for homer programs like annotatePeaks, mergePeaks
    From:
    chrom[0], start[1], end[2], name[3], score[4], strand[5], etc...[6:]
    To:
    name[3], chrom[0], start[1], end[2], strand[5], score[4], etc...[6:]
    """
    if peak_file is None:
        peak_file= re.sub('\.bed$', '', bed_file) + '.peaks'
    fin= open(bed_file)
    fout= open(peak_file, 'w')
    for line in fin:
        line= line.strip()
        line= line.split('\t')
        if len(line) < 7:
            pad= [NA_STRING] * (7 - len(line))
            line= line + pad
        peak_line= [line[3]] + line[0:3] + [line[5]] + [line[4]] + ['\t'.join(line[6:])]
        if peak_line[4] == NA_STRING:
            peak_line[4]= '+'
        peak_line= '\t'.join(peak_line)
        fout.write(peak_line + '\n')
    fin.close()
    fout.close()

def annotatePeaks(peak_file, annotated_file= 'peaks.annotated', opt= '', refgenome= 'hg18', echo= False, bed= False):
    """ Use annotatePeaks to produce peak annotation from findPeaks/mergePeaks output.
    Note the output is converted to bed and blanks replaced by NA
    opt:
        Additional option as string to be passed to annotatePeaks.pl
    bed:
        Is input file in bed format?
    """
    if bed is True:
        tmp_peak_file= tempfile.NamedTemporaryFile(suffix= '.findPeaks')
        tmp_named= tmp_peak_file.name
        bed_to_homer_peaks(peak_file, peak_file= tmp_named)
        del peak_file 
        peak_file= tmp_named
       
    (outdir, myfile)= os.path.split(annotated_file)
    make_wdir(outdir)
    if os.path.exists(annotated_file):
        os.remove(annotated_file)
    
    cmd= 'annotatePeaks.pl %s %s %s > %s ' %(peak_file, refgenome, opt, annotated_file)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    if echo is False:
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
        p.wait()
    else:
        return()
    
    ## Convert to bed. Also, remove header line and replace NA with ''.
    print('Converting %s to bed' %(annotated_file))
    bed= open(annotated_file + '.bed', 'w')
    fin= open(annotated_file)
    i= 0
    for line in fin:
        if i == 0:
            i= 1
            continue
        line= line.rstrip('\n\r')
        line_na= line.split('\t')
        line= []
        for x in line_na:
            if x == '':
                line.append(NA_STRING)
            else:
                line.append(x)
        bed_line= [line[1]] + [line[2]] + [line[3]] + [line[0]] + [line[5]] + [line[4]] + ['\t'.join(line[6:])]
        bed_line= '\t'.join(bed_line)
        bed.write(bed_line + '\n')    
    bed.close()
    fin.close()

def coverageBed(bam_dir, ref_bed= 'peaks.annotated.bed', coverage_dir= 'coverage', file_regex= '\.bam$', opt= '', rpkm= True):
    """
    Use coverageBed to count tags for each feature in ref_bed.
    UPDATE: RPKM column inserted after each entry in the reference bed and
    before the output of coverageBed
    
    bam_dir:
        dir with tags to be counted
    ref_bed
        BED file with regions where to count tags
    coverge_dir
        Dir where output beds will be. Created in it doesn't exists
    file_regex
        Use files in bam_dir ending with this string.
    rpkm:
        If True calculate rpkm using the number of reads in the bam file and the
        number of reads in each feature in ref_bed taken from the 4th last column
        from the output of coverageBed
    opt:
        Option string to be passed to coverageBed
    
    """
    print('\nExecuting coverageBed() ...')
    
    if not os.path.exists(coverage_dir):
        os.makedirs(coverage_dir)
    
    beds= os.listdir(bam_dir)
    beds.sort()
    beds= [x for x in beds if re.search(file_regex, x)]
    beds_path= [os.path.join(bam_dir, x) for x in beds]

    cov_beds= [re.sub(file_regex, '.coverage.bed', x) for x in beds]
    cov_beds_path= [os.path.join(coverage_dir, x) for x in cov_beds]
    
    procs= []
    for afile, cov in zip(beds_path, cov_beds_path):
        cmd= 'coverageBed %s -abam %s -b %s | sort -k 1,1 -k 2,2n -k 3,3n > %s ' %(opt, afile, ref_bed, cov)
        print(cmd)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
        procs.append(p)
        if len(procs) >= NO_SUBPROCS:
            for x in procs:
                x.wait()
            procs= []
    for x in procs:
        x.wait()
    " Add rpkm column "
    if rpkm:
        for afile, cov in zip(beds_path, cov_beds_path):
            " Make sure the coverage is in the 4th last column! "
            coverageBed_rpkm(cov, afile, count_pos= -4)


def coverageBed_rpkm(coverageBed, bam, count_pos= -4):
    """
    Compute the rpkm for each feature from the output of coverageBed using as
    total number of reads the read count from bam.
    The rpkm column is inserted immediatly *before* the column containing the read
    count, that is the 4th column from right in the default output of coverageBed.
    
    coverageBed:
                The output from coverageBed for which to calculate rpkm
    bam:
                bam file to use to calculate the total number of reads (this is
                the bam file used for coverageBed.            
    count_pos:
                Column index (0 based) in coverageBed holding the number of reads
                overlappig each feature. In the default output from coverageBed
                this column is the 4th from right.
                Negative index is allowed. Default -4 suites the default from
                coverageBed
    """
    bamfile = pysam.Samfile(bam, "rb")
    mappedreads= 0
    for AlignedRead in bamfile:
        mappedreads += 1
    bamfile.close()

    covbed= open(coverageBed).readlines()
    ## rpkmbed= re.sub('$.bed', '', coverageBed) + 'rpkm.bed'
    fout= open(coverageBed, 'w')
    for line in covbed:
        line= line.strip('\n\r')
        line= line.split()
        exonreads= int(line[count_pos])
        exonlength= int(line[2]) - int(line[1])
        rpkm= exonreads / ((mappedreads / 1000000.0) * (exonlength / 1000.0))        
        line.insert(count_pos, str(rpkm))
        line= '\t'.join(line)
        fout.write(line + '\n')
    fout.close()
    
    
def coverage_matrix(coverage_dir= os.getcwd(), matrix_file= 'coverage_matrix.txt', opt= "-x -4 -e '\.bed$'", echo= False):
    """
    Produce matrix of number of features per peak. Peaks (rows) and samples (columns).
    NB: Input should be sorted in the same way
    """
    print('\nExecuting chipseq_pipeline.coverage_matrix() ...')
    cmd= 'make_coverage_matrix.py -i %s %s > %s' %(coverage_dir, opt, matrix_file)
    print(cmd)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    if echo is False:
        p= subprocess.Popen(cmd, shell= True)
        p.wait()

def call_matrix(call_file, matrix_file= None, row_header= 3, calls_index= 6, calls_sep= '|', strip_dir= True, strip_exts= True, sub_regex= None):
    """
    Produces a matrix of 0/1 for a peak (rows) being called in library x (columns)
    By defualt, gets the information on the 7th column of annotatePeaks output
    (either as is or bed)
    IMPORTANT output from "mergeBed -nms ..." has merged features in the 4th or 5th column (hance calls_index= 3 and calls_sep= ';')
    IMPORTANT: Input file must not have header line.
    
    call_file:
        File to scan for presence/absence of a call. Typically the output of
        annotatePeaks or mergePeaks either as is or as bed
    matrix_file:
        Output file. By default gets the same name of call_file with appended '.call_matrix'
    row_header:
        Column index with feature identifiers (peak_id). 0-based. 3 suites bed files
    calls_index:
        Column index with the calls. 7th column suites bed files and output from annotatePeaks. 4th or 5th col for
        margeBed -nms
    calls_sep:
        Column containing the called files has files separated by this delimiter e.g. "/peaks/file1.peaks|/peaks/file2.peaks".
        Use ';' for output from "mergeBed -nms"
    strip_dir, strip_exts:
        strip directory and/or extension from files in call_index?
    sub_regex:
        Pass this regex to re.sub() to extract the file name from the peak name (if the calls column has peak names instead of file names).
        E.g.: To extract 'new001':
            sub_regex= '_peak_.*$'
            x= 'new004_peak_17'
            re.sub(sub_regex, '', x)
        Default None skips the re.sub() step.
        IMPORTANT: If you use this option make sure that the file name *can* be extracted from the peak name!
    
    """
    print('\nExecuting chipseq_pipeline.call_matrix() ...')
    if matrix_file is None:
        matrix_file= call_file + '.call_matrix'
    else:
        (outdir, myfile)= os.path.split(matrix_file)
        make_wdir(outdir)

    fin= open(call_file)
    fout= open(matrix_file, 'w')
    files_dict= {}
    names= []
    nlines= 0
    for line in fin:
        " Get the name of the features and tproduce a dict of the merged file names. "
        nlines += 1
        line= line.strip('\n\r').split('\t')
        names.append(line[row_header])
        files= line[calls_index]
        files= files.split(calls_sep)
        if sub_regex is not None:
            files= [re.sub(sub_regex, '', x) for x in files]
        for f in files:
            if files_dict.has_key(f):
                continue
            else:
                files_dict[f]= []
    fin.seek(0)
    keys= files_dict.keys()
    for line in fin:
        line.strip('\n\r')
        line= line.split('\t')
        files= line[calls_index]
        files= files.split(calls_sep)
        if sub_regex is not None:
            files= [re.sub(sub_regex, '', x) for x in files]
        for k in keys:
            if k in files:
                files_dict[k].append(1)
            else:
                files_dict[k].append(0)
    keys= files_dict.keys()
    keys.sort()

    kstrip_dir= []
    kstrip= []
    if strip_dir is True:
        for k in keys:
            (dir, f)= os.path.split(k)
            kstrip_dir.append(f)
    else:
        kstrip_dir= [x for x in keys]

    if strip_exts is True:
        for k in kstrip_dir:
            (f, ext)= os.path.splitext(k)
            kstrip.append(f)
    else:
        kstrip= [x for x in kstrip_dir]
    fout.write('\t'.join(['peak_id'] + kstrip) + '\n')

    for i in range(0, nlines):
        line= [names[i]]
        for k in keys:
            line.append(str(files_dict[k][i]))
        line='\t'.join(line)
        fout.write(line + '\n')
    fin.close()
    fout.close()

def paste_mergefiles(mergef= 'macs/mergepeaks/macs.mergeBed.bed', covf= 'macs/coverage/coverage_matrix.txt', callf= 'macs/mergepeaks/macs.mergeBed.bed.call_matrix', outfile= 'macs/macs.pasted.bed'):
    """
    Paste side by side the merged files:
    *.mergeBed.bed (output of mergeBed)                   <<< w/o column header
    coverage_matrix.txt (output of coverage_matrix())     <<< w/ row and column header
    *.mergeBed.bed.call_matrix (output of calls_matrix()) <<< w/ row and column header
    
    Coverage and call matrix are squeezed in a single column.
    These files *must* be sorted in the same way    
    """
    print('\nExecuting chipseq_pipeline.paste_mergefiles() ...')
    mergefh= open(mergef)
    covfh= open(covf)
    callfh= open(callf)
    outfh= open(outfile, 'w')
    
    cov_header= ';'.join(covfh.readline().strip().split('\t')[1:])
    call_header= ';'.join(callfh.readline().strip().split('\t')[1:])
    
    column_header= '\t'.join(["chrom", "start", "end", "peak_region_id", "strand", "no_peaks_unused", "merged_peaks", "annotation", "detailed_annotation", "distance_to_tss", "nearest_promoter_id", "entrez_id", "nearest_unigene_id", "nearest_refseq_id", "nearest_ensembl_id", "gene_name", "gene_alias", "gene_description", "gquad_chrom", "gquad_start", "gquad_end", "gquad_id", "gquad_seq", "gquad_strand", "distance_to_nearest_gquad integer", 'project_id', 'basenames_coverage', 'coverage', 'basenames_call', 'calls'])
    outfh.write(column_header + '\n')
    for mline, vline, cline in zip(mergefh, covfh, callfh):
        vline= ';'.join(vline.rstrip('\n\r').split('\t')[1:])
        cline= ';'.join(cline.rstrip('\n\r').split('\t')[1:])
        line= mline.rstrip('\n\r') + '\t' + cov_header + '\t' + vline + '\t' + call_header + '\t' + cline + '\n'
        outfh.write(line)
    mergefh.close()
    covfh.close()
    callfh.close()
    outfh.close()
    return()
    
def macs2(aln_dir, design, output_dir= 'macs/peaks', opt= '', file_regex= '\.bam$', output_exts= '.macs', project_id= 'NA'):
    """ Executes macs on all the files in dir aln_dir matching extension exts.
    design <str>
        Design file containing columns "basename" and "input" to match. Typically os.path.join(project-dir, "design.txt")
    output_dir <str>
        Path to output dir. will be created if doesn't exists.
    opt <str>
        String of options to verbatim to findPeaks.
    output_extension:
        Output files will be named *basename*.output_extension where basename comes from design$basename
        """
    print('\nExecuting chipseq_pipeline.macs() ...')
    
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
       
    aln_files= sorted(os.listdir(aln_dir))
        
    design= read_design(design)
    procs= []
    for aln, input in zip(design['alignment_bam'], design['input_bam']):
        outfile= os.path.join(output_dir, os.path.splitext(aln)[0]) + output_exts
        cmd= 'macs14 -t %s -c %s -n %s %s' %(os.path.join(aln_dir, aln), os.path.join(aln_dir, input), outfile, opt)
        print(cmd)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
        procs.append(p)
        if len(procs) >= NO_SUBPROCS:
            for x in procs:
                x.wait()
            procs= []
    for x in procs:
        x.wait()
    print('Combining macs output...')
    macs_basename= [os.path.join(output_dir, x) for x in sorted(os.listdir(output_dir)) if re.search('\.macs_summits\.bed$', x)]
    macs_basename= [re.sub('\.macs_summits\.bed$', '', x) for x in macs_basename]
    for peak in macs_basename:
        combine_macs(peak, project_id)
    get_macs_summary(macs_peaks_xls= [x + '.macs_peaks.xls' for x in macs_basename])
    concat= 'macs.combined.concatenated.bed'
    cmd= """
        cd %(output_dir)s
        echo -e 'chr\tsummit_start\tsummit_end\tmacs_id\tpileup_at_summit\tstrand\tpeak_start\tpeak_end\tpeak_length\tsummit_dist_from_start\ttags_in_region\tlog_pvalue\tfold_enrich\tfdr_percent\tfile_basename\tproject_id' > %(concat)s
        cat %(macs)s >> %(concat)s
        tar czvf macs.combined.bed.tar.gz %(macs)s
        """ %{'output_dir': output_dir, 'concat':concat, 'macs': '*.macs_combined.bed'}
    log_cmd(log_file= CMD_FILE, logline= cmd)
    p= subprocess.Popen(cmd, shell= True)
    p.wait()
    ## Produce graphs
    p= subprocess.Popen('macs_stats_graphs.R --args macs=%s' %(os.path.join(output_dir, concat)), shell= True)
    p.wait()
    return()

def macs(aln_dir, design, output_dir= 'macs/peaks', opt= '', file_regex= '\.bed$', echo= False, output_exts= '.macs', project_id= 'NA', input_ext= '.bam'):
    """ Executes macs on all the files in dir aln_dir matching extension exts.
    design <str>
        Design file containing columns "basename" and "input" to match. Typically os.path.join(project-dir, "design.txt")
    output_dir <str>
        Path to output dir. will be created if doesn't exists.
    opt <str>
        String of options to verbatim to findPeaks.
    echo:
        If True just print out the system command and exit
    exts:
        Process only input files ending with this extension
    output_extension:
        Output files will be named *basename*.output_extension where basename comes from design$basename
    input_ext:
        The extension of the input files (include dot! I.e. '.bam' not 'bam')
        """
    print('\nExecuting chipseq_pipeline.macs() ...')
    
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
       
    aln_files= os.listdir(aln_dir)
    aln_files.sort()
    aln_files= [x for x in aln_files if re.search(file_regex, x)]
        
    design= read_design(design)
    procs= []
    for basename, input in zip(design['basename'], design['input']):
        outfile= os.path.join(output_dir, basename) + output_exts
        
        tags= []
        for x in aln_files:
            if x == basename + input_ext:
                tags.append(x)
        if len(tags) > 1:
            sys.exit('macs(): basename %s retrieved more than one file: %s' %(basename, tags))
        if len(tags) == 0:
            sys.exit('macs(): basename %s did not retrieve any file' %(basename))
        tags= tags[0]
        tagdir_path= os.path.join(aln_dir, tags)
        
        input_tags= []
        for x in aln_files:
            if x == input + input_ext:
                input_tags.append(x)
        if len(input_tags) > 1:
            sys.exit('macs(): input name %s retrieved more than one file: %s' %(input, input_tags))
        if len(input_tags) == 0:
            sys.exit('macs(): input name %s did not retrieve any file' %(input))
        input_tags= input_tags[0]
        inputdir_path= os.path.join(aln_dir, input_tags)
    
        cmd= 'macs14 -t %s -c %s -n %s %s' %(tagdir_path, inputdir_path, outfile, opt)
        print(cmd)
        log_cmd(log_file= CMD_FILE, logline= cmd)
        if echo is False:
            p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
            procs.append(p)
            if len(procs) >= NO_SUBPROCS:
                for x in procs:
                    x.wait()
                procs= []
        else:
            continue
    for x in procs:
        x.wait()
    print('Combining macs output...')
    macs_basename= [os.path.join(output_dir, x) for x in sorted(os.listdir(output_dir)) if re.search('\.macs_summits\.bed$', x)]
    macs_basename= [re.sub('\.macs_summits\.bed$', '', x) for x in macs_basename]
    for peak in macs_basename:
        combine_macs(peak, project_id)
    get_macs_summary(macs_peaks_xls= [x + '.macs_peaks.xls' for x in macs_basename])
    concat= 'macs.combined.concatenated.bed'
    cmd= """
        cd %(output_dir)s
        echo -e 'chr\tsummit_start\tsummit_end\tmacs_id\tpileup_at_summit\tstrand\tpeak_start\tpeak_end\tpeak_length\tsummit_dist_from_start\ttags_in_region\tlog_pvalue\tfold_enrich\tfdr_percent\tfile_basename\tproject_id' > %(concat)s
        cat %(macs)s >> %(concat)s
        tar czvf macs.combined.bed.tar.gz %(macs)s
        """ %{'output_dir': output_dir, 'concat':concat, 'macs': '*.macs_combined.bed'}
    log_cmd(log_file= CMD_FILE, logline= cmd)
    p= subprocess.Popen(cmd, shell= True)
    p.wait()
    ## Produce graphs
    p= subprocess.Popen('macs_stats_graphs.R --args macs=%s' %(os.path.join(output_dir, concat)), shell= True)
    p.wait()
    return()
    
def combine_macs(file_basename, project_id, header= False):
    """
    Combine the 3 main outputs of macs and add a file_id and a project_id
    file_basename is the part on the right of '.macs_summits.bed' e.g. in 'new001.macs_summits.bed' it is 'new001'.
    The peak names are renamed from MACS_peak_1 to new001_peak_1 (i.e. replace MACS with basename).
    
    file_basename: can include path like 'new001' or 'macs/peaks/new001' if the actual files are 'macs/peaks/new001.macs_summits.bed etc...'

    header:
        The resulting file will have columns (*not* in the file) if header= False. If True it will be:
        ['chr', 'summit_start', 'summit_end', 'macs_id', 'pileup_at_summit', 'strand', 'peak_start', 'peak_end', 'peak_length', 'summit_dist_from_start', 'tags_in_region', 'log_pvalue', 'fold_enrich', 'fdr_percent', 'file_basename', 'project_id']
    
    """
    fh_macs_summits_bed= open(file_basename + '.macs_summits.bed')
    fh_macs_peaks_bed= open(file_basename + '.macs_peaks.bed')
    fh_macs_peaks_xls= open(file_basename + '.macs_peaks.xls')
    outf= open(file_basename + '.macs_combined.bed', 'w')
    if header is True:
        colnames= ['chr', 'summit_start', 'summit_end', 'macs_id', 'pileup_at_summit', 'strand', 'peak_start', 'peak_end', 'peak_length', 'summit_dist_from_start', 'tags_in_region', 'log_pvalue', 'fold_enrich', 'fdr_percent', 'file_basename', 'project_id']
        outf.write('\t'.join(colnames) + '\n')
    ## Go to the beginning of the xls file
    while True:
        xls_line= fh_macs_peaks_xls.readline()
        xls_line= xls_line.strip()
        if xls_line.startswith('#') or xls_line == '':
            continue
        else:
            break
    xls_line= (fh_macs_peaks_xls.readline().strip()).split('\t')
    peaks_bed_line= (fh_macs_peaks_bed.readline().strip()).split('\t')
    summits_bed_line= (fh_macs_summits_bed.readline().strip()).split('\t')
    summits_bed_line[3]= re.sub('^MACS', os.path.split(file_basename)[1], summits_bed_line[3])
    combined_line= summits_bed_line + ['+'] + peaks_bed_line[1:3] + xls_line[3:] + [os.path.split(file_basename)[1]] + [project_id]
    outf.write('\t'.join(combined_line) + '\n')
    while True:
        peaks_bed_line= fh_macs_peaks_bed.readline().strip()
        if peaks_bed_line == '':
            break
        peaks_bed_line= peaks_bed_line.split('\t')
        summits_bed_line= (fh_macs_summits_bed.readline().strip()).split('\t')
        summits_bed_line[3]= re.sub('^MACS', os.path.split(file_basename)[1], summits_bed_line[3])
        xls_line= (fh_macs_peaks_xls.readline().strip()).split('\t')
        combined_line= summits_bed_line + ['+'] + peaks_bed_line[1:3] + xls_line[3:] + [os.path.split(file_basename)[1]] + [project_id]
        outf.write('\t'.join(combined_line) + '\n')
    fh_macs_summits_bed.close()
    fh_macs_peaks_bed.close()
    fh_macs_peaks_xls.close()
    outf.close()
    return(file_basename + '.macs_combined.bed')

def get_tags_in_peaks_macs(macs_peak_xls):
    """ Read macs *.xls file to get the total number of tags in peaks.
    In other words, get the sum of 'tags' column (6th)
    ARGS:
    macs_peak_xls:
        File name (.xls output from macs)
    
    RETURNS:
        Integer of the tags sum.
        
    Input looks like this:
    ----------------------------------------------------------------------------
    # This file is generated by MACS version 1.4.1 20110627
    # ARGUMENTS LIST:
    # name = macs/peaks/ds001_sample1.macs
    # format = AUTO
    [...]
    # Redundant rate in control: 0.00
    # d = 126
    chr     start   end     length  summit  tags    -10*log10(pvalue)       fold_enrichment FDR(%)
    chr1    6140523 6140777 255     124     4       1370.87 1.98    0.00
    chr1    6942109 6942522 414     274     16      3100.00 5.29    0.00
    chr1    6943072 6943497 426     230     9       3201.38 3.94    0.00
    chr1    7430480 7430793 314     196     11      3100.00 4.54    0.00
    chr1    8164921 8165204 284     122     4       1360.63 2.16    0.00
    [...]
    """
    try:
        fin= open(macs_peak_xls).readlines()
    except:
        return('get_tags_in_peaks_macs: Cannot open file: %s' %(macs_peak_xls))
    xls= [x for x in fin if x.strip() != '']
    xls= [x for x in xls if x.startswith('#') is False]
    xls= xls[1:]
    tags_in_peaks= 0
    for line in xls:
        line= line.split('\t')
        tags_in_peaks += int(line[5])
    return(tags_in_peaks)

def get_macs_negative_peaks(macs_dir, output_exts= '.macs', output_file= 'macs_negative_peaks.tsv'):
    """
    Count the number of positive peaks and compares with negative peaks 
    macs_dir:
        Dir with macs files
    output_exts:
        Extension to identify macs files (should be the same as output_exts in fumction macs())
    output_file:
        Where output will be written to.
    """
    output_fh= open(output_file, 'w')
    macs_files= sorted(os.listdir(macs_dir))
    macs_files= [x for x in macs_files if output_exts in x]
    positive_peaks= [x for x in macs_files if 'summits.bed' in x]
    negative_peaks= [x for x in macs_files if 'negative_peaks.xls' in x]
    if len(positive_peaks) != len(negative_peaks):
        return('Exiting chipseq_pipeline.get_macs_negative_peaks(): Number of "positive peaks files" (%s) is not equal to the number of "negative peaks files" (%s)' %(len(positive_peaks), len(negative_peaks)))
    
    output_fh.write('\t'.join(['peak_file', 'no_peaks', 'negative_file', 'no_negative_peaks', 'perc_negative']) + '\n')
    for p, n in zip(positive_peaks, negative_peaks):
        " Make sure positive and negative file refer to the same run "
        pos_basename= p[0:p.find(output_exts)]
        neg_basename= n[0:n.find(output_exts)]
        if pos_basename != neg_basename:
            return('Exiting chipseq_pipeline.get_macs_negative_peaks(): mismatch in the name of positive (%s) and negative peak file (%s)' %(p, n))
        pos_file= open(os.path.join(macs_dir, p)).readlines()
        npos= len(pos_file)
        neg_file= open(os.path.join(macs_dir, n)).readlines()
        nneg= len(neg_file)-1
        
        perc_neg= str(round(100*(float(nneg)/float(npos)), 2))
        
        output_fh.write( ('\t'.join([p, str(npos), n, str(nneg), perc_neg])) + '\n' )
    output_fh.close()
    
def get_macs_summary(macs_peaks_xls):
    """
    Get the header lines from macs xls files. Also computes tot number of tags
    in peaks and %enrichment.
    Send output to remine_wiki dir
    
    macs_peaks_xls: *List* of files to parse. Typically something like [new001.macs_peaks.xls, new002.macs_peaks.xls, etc...]
    
    Returns file macs_summary.txt in current dir
    
    """

    header= ['macs_version', 'macs_basename', 'format', 'chipseq_file', 'ctrl_file', 'eff_genome_size', 'band_width', 'model_fold', 'pval_cutoff', 'scaling', 'lambda range', 'tag_size', 'tot_tags_ip', 'tot_tags_ip_after_filter', 'max_dupl_tags_ip', 'redundant_rate_ip', 'tot_tags_in_ctrl', 'tot_tags_ctrl_after_filter', 'max_dupl_tags_ctrl', 'redundant_rate_ctrl', 'd', 'tags_in_peaks', 'enrichment_perc']
    summary= ['\t'.join(header)]
    for f in macs_peaks_xls:
        tags_in_peaks= get_tags_in_peaks_macs(f)
        fh= open(f)
        summary_lines= []
        for line in fh:
            " Read comment lines "
            line= line.strip()
            if line.startswith('# This file is generated by MACS version '):
                summary_lines.append(re.sub('^# This file is generated by MACS version ', '', line))
            elif line.startswith('# ARGUMENTS LIST:'):
                continue
            elif line.startswith('# name = '):
                summary_lines.append(re.sub('# name = ', '', line))
            elif line.startswith('# format = '):
                summary_lines.append(re.sub('# format = ', '', line))
            elif line.startswith('# ChIP-seq file = '):
                summary_lines.append(re.sub('# ChIP-seq file = ', '', line))
            elif line.startswith('# control file = '):
                summary_lines.append(re.sub('# control file = ', '', line))
            elif line.startswith('# effective genome size = '):
                summary_lines.append(re.sub('# effective genome size = ', '', line))
            elif line.startswith('# band width = '):
                summary_lines.append(re.sub('# band width = ', '', line))
            elif line.startswith('# model fold = '):
                summary_lines.append(re.sub('# model fold = ', '', line))
            elif line.startswith('# pvalue cutoff = '):
                summary_lines.append(re.sub('# pvalue cutoff = ', '', line))
            elif line.startswith('# Small dataset will be scaled towards larger dataset'):
                summary_lines.append('')
            elif line.startswith('# Range for calculating regional lambda is: '):
                summary_lines.append(re.sub('# Range for calculating regional lambda is: ', '', line))
            elif line == '':
                continue
            elif line.startswith('# tag size is determined as '):
                summary_lines.append(re.sub('# tag size is determined as ', '', line))
            elif line.startswith('# total tags in treatment: '):
                tags_in_treat= re.sub('# total tags in treatment: ', '', line)
                summary_lines.append(tags_in_treat)
            elif line.startswith('# tags after filtering in treatment: '):
                summary_lines.append(re.sub('# tags after filtering in treatment: ', '', line))
            elif line.startswith('# maximum duplicate tags at the same position in treatment = '):
                summary_lines.append(re.sub('# maximum duplicate tags at the same position in treatment = ', '', line))
            elif line.startswith('# Redundant rate in treatment: '):
                summary_lines.append(re.sub('# Redundant rate in treatment: ', '', line))
            elif line.startswith('# total tags in control: '):
                summary_lines.append(re.sub('# total tags in control: ', '', line))
            elif line.startswith('# tags after filtering in control: '):
                summary_lines.append(re.sub('# tags after filtering in control: ', '', line))
            elif line.startswith('# maximum duplicate tags at the same position in control = '):
                summary_lines.append(re.sub('# maximum duplicate tags at the same position in control = ', '', line))
            elif line.startswith('# Redundant rate in control: '):
                summary_lines.append(re.sub('# Redundant rate in control: ', '', line))
            elif line.startswith('# d = '):
                summary_lines.append(re.sub('# d = ', '', line))
            else:
                break
        summary_lines.append(str(tags_in_peaks))
        summary_lines.append(str( round(100*(tags_in_peaks / float(tags_in_treat)), 3) ))
        summary_lines= '\t'.join(summary_lines)
        summary.append(summary_lines)
    fh.close()
    if not os.path.exists('redmine_wiki'):
        os.makedirs('redmine_wiki')
    fout_long= open('redmine_wiki/macs_xls_long_summary.txt', 'w')
    for line in summary:
        fout_long.write(line + '\n')
    fout_long.close()
    fout_short= open('redmine_wiki/macs_xls_short_summary.txt', 'w')
    for line in summary:
        line= line.split('\t')
        line[3]= os.path.split(line[3])[1]
        line= '\t'.join([line[i] for i in [3,12,21,22,13,14,15,16,17,18,19]])
        fout_short.write(line + '\n')
    fout_short.close()
    
def concatenate_files(files, project_id, outfile= 'concatenated.txt', skip= 0, comment_char= '#', sep= '\t', skip_blanks= True):
    """
    Concatenate the list of 'files'. Add to each file a column with the file name
    (witout path!) and a column for the project name (project_id).
    """
    outf= open(outfile, 'w')
    for f in files:
        file_id= os.path.split(f)[1]
        fin= open(f)
        n= 0
        for line in fin:
            line= line.strip()
            if line.startswith(comment_char) and comment_char != '':
                continue
            if skip_blanks is True and line == '':
                continue
            n += 1
            if n > skip:
                line= line + sep + file_id + sep + project_id + '\n'
                outf.write(line)
            else:
                print(file_id, line)
        fin.close()
    outf.close()


def fastqc(aln_dir, output_dir= 'fastqc', opt= '', file_regex= '\.bam$', echo= False):
    """ Excutes fastQC on all the files in aln_dir ending with 'exts'
    """
    print('\nExecuting chipseq_pipeline.fastqc()...')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
       
    aln_files= os.listdir(aln_dir)
    aln_files= [x for x in aln_files if re.search(file_regex, x)]
    if len(aln_files) == 0:
        return('No input files found in dir "%s" ending with "%s"' %(aln_dir, exts))
    aln_files= [os.path.join(aln_dir, x) for x in aln_files]
    aln_files.sort()
    aln_files= ' '.join(aln_files)
    
    cmd= 'fastqc %s -o %s %s' %(opt, output_dir, aln_files)
    log_cmd(log_file= CMD_FILE, logline= cmd)    
    if echo is False:
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
        p.wait()
        for x in os.listdir(output_dir):
            if x.endswith('.zip'):
                os.remove(os.path.join(output_dir, x))
        try:
            p= subprocess.Popen('tar czvf %s %s' %(output_dir + '_fastqc.tar.gz', output_dir), shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
            p.wait()
        except:
            return('fastqc: Could not compress %s' %(output_dir))
        get_fastqc_dupl(output_dir= output_dir, outfile= 'fastqc_dupl_lev.txt')
        shutil.rmtree(output_dir)
        if not os.path.exists(WIKI_DIR):
            os.makedirs(WIKI_DIR)
        shutil.copy(output_dir + '_fastqc.tar.gz', WIKI_DIR)
    else:
        return()

def get_fastqc_dupl(output_dir, outfile= 'fastqc_dupl_lev.txt'):
    """ Extract the sequence duplication level from
    all the files in the output dir of FastQC.
    The output is sent to file in the WIKI_DIR dir.
    """
    fastqc_dirs= [x for x in os.listdir(output_dir) if not x.endswith('.zip')]
    fastqc_dirs.sort()
    fastqc_dirs_path= [os.path.join(output_dir, x) for x in fastqc_dirs]
    if not os.path.exists(WIKI_DIR):
        os.makedirs(WIKI_DIR)
    outf= open(os.path.join(WIKI_DIR, outfile), 'w')
    outf.write('\t'.join(['file', 'tot. dup.', 'dup= 2', 'dup= 3', 'dup= 4']) + '\n')
    for d in fastqc_dirs_path:
        qcdata= open(os.path.join(d, 'fastqc_data.txt')).readlines()
        qcdata= [x.strip() for x in qcdata]
        ## Get the name of the file
        filename_line= [x for x in qcdata if x.startswith('Filename\t')][0]
        filename= re.sub('Filename\t', '', filename_line)
        dup_list= [filename]
        ## Get the total duplication level
        duplevel_line= [x for x in qcdata if x.startswith('#Total Duplicate Percentage\t')][0]
        duplevel= round(float(re.sub('#Total Duplicate Percentage\t', '', duplevel_line)), 3)
        dup_list.append(duplevel)
        ## Get the first duplication levels
        dup_table_start= qcdata.index('#Duplication Level	Relative count')
        dup_2= round(float(qcdata[dup_table_start + 2].split('\t')[1]), 3)
        dup_list.append(dup_2)
        dup_3= round(float(qcdata[dup_table_start + 3].split('\t')[1]), 3)
        dup_list.append(dup_3)
        dup_4= round(float(qcdata[dup_table_start + 4].split('\t')[1]), 3)
        dup_list.append(dup_4)
        dup_list= [str(x) for x in dup_list]
        outf.write('\t'.join(dup_list) + '\n')
    outf.close()
    return()
       
def data_typer(data_sample, na_string):
    """ Determine the datatype of the columns of a table (list of lists). 
    Input is a list of lists and a list of column names.
    Output is a list of data types (chosen from double precision, int, or text)
    Example:
    data_sample= [[1, 1.0, 'a', ''],
                  [2, 2.1, 'b', '']
                 ]

    returns: column_types= ['int', 'double precision', 'text', 'text']
    
    This function has been taken from http://code.google.com/p/postgresql-read-table/source/browse/trunk/read_table-0.97-alpha-py3.sql """

    column_types= []
    for i in range(0, len(data_sample)):
        " Test data type of each column "
        col_i= [x[i] for x in data_sample if x[i] != na_string] ## Fetch one column and remove NAs from it. NAs are always allowed so don't consider them to determine datatypes
        if set(col_i) == set([]):
            " If a column is all NULLs set it to type text "
            column_types.append('text')
            continue
        try:
            """ Try to convert each element to float. If this is possible,  
            see if the module of %1 is always zero. """
            test_float= set([float(x)%1 for x in col_i])
            if test_float == set([0.0]) and all(['.' not in str(x) for x in col_i ]):
                " If the module %1 is 0 and there are no decimals ('e.g. 1.0') set type to int "
                col_i_type= 'int' 
            else:
                col_i_type= 'double precision' 
        except:
            " If can't convert to float set type to text "
            col_i_type= 'text'
        column_types.append(col_i_type)
    return(column_types)

def igvtools_sort(infile, outfile, echo):
    cmd= 'igvtools sort %s %s' %(infile, outfile)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    if echo is False:
        p= subprocess.Popen(cmd, shell= True)
        return(p)
    else:
        return()

def igvtools_index(infile, echo):
    cmd= 'igvtools index %s' %(infile)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    if echo is False:
        p= subprocess.Popen(cmd, shell= True)
        return(p)
    else:
        return()

def tree(opt= '.'):
    """ Print out directory tree to stdout """
    p= subprocess.Popen('tree %s' %(opt), shell= True)

def exec_generic(cmd, echo= False):
    """ Executes shell cmd """
    if echo is False:
        log_cmd(log_file= CMD_FILE, logline= cmd)
        print(cmd)
        p= subprocess.Popen(cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a'))
        p.wait()
    else:
        print(cmd)
    return(cmd)


# ---------------------[ PostgreSQL related function ]--------------------------

def md5_file(project_id, conn, files= os.listdir(os.getcwd()), notes= ''):
    """
    Get md5sum and URL for the list of files in 'files' and/or for all the files
    in dir.
    
    project_id:
        Project to which these files belong to.
    dir:
        All the files in this directory will be md5'd
    conn:
        A dictionary with entries to connect to postgres via psycopg2 {'host':'xxx.xx.xxx.xxx', 'database':'sblab', 'user':'bla'}
    notes:
        Optional string to pass include in project_files table
    E.g.
    Get the md5 for all the bed files in macs/peaks:
    md5_file(files= [os.path.join('macs/peaks', f) for f in os.listdir('macs/peaks') if x.endswith('.bed')])  
    
    """
    pgconn= psycopg2.connect(host= conn['host'], database= conn['database'], user= conn['user'])
    cur = pgconn.cursor()

    host = socket.gethostname()
    full_paths= [os.path.abspath(x) for x in files]
    md5list= []
    for f in full_paths:
        if os.path.isdir(f) is True:
            continue
        cmd5= [project_id, host, os.path.split(f)[0], os.path.split(f)[1]] 
        p= subprocess.Popen('md5sum %s' %(f), shell= True, stdout= subprocess.PIPE)
        p.wait()
        mdx= p.stdout.read().split()[0]
        cmd5.append(mdx)
        cmd5.append(datetime.datetime.now().isoformat())
        cmd5.append(notes)
        cur.execute('DELETE FROM main.project_files WHERE project_id = %s AND hostname= %s AND path= %s AND filename= %s', (cmd5[0:4])) ## Delete possible outdated entry
        entry= cur.fetchone()[0]
        print(line)
        return(line)

def create_schema(cursor, schema):
    """ Using the open cursor to create schema if it doesn't exist.
    cursor:
        Cursor opened by conn= psycopg2.connect(...); cur= conn.cursor()
    schema:
        Name of schema to create
    
    Returns True if schema existed already, False otherwise
    """
    cursor.execute(""" select exists (select * from pg_catalog.pg_namespace where nspname = '%s');""" %(schema))
    schema_exists= cursor.fetchall()[0][0]
    if schema_exists is False:
        cursor.execute(""" CREATE SCHEMA %s;""" %(schema))
        cursor.execute(""" COMMENT ON SCHEMA %s IS 'Schema created by chipseq_pipeline';""" %(schema))
    return(schema_exists)

def scp_file(infile, connection, dest_file= '/tmp/tmp_pg_upload.txt'):
    """ scp 'infile' to destination dest_file.
    infile:
        File to copy across
    connection <dict>:
        Connection parameters to connect to remote host. A dictionary with key:value: 'scp_user' and 'host'.
    NOTE: scp_user has to be able to connect to remote host without being promtp for password (i.e. via ssh keygen pair)
    """
    cmd= 'scp %s %s@%s:%s' %(infile, connection['scp_user'], connection['host'], dest_file)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    p= subprocess.Popen(cmd, shell= True, stderr= subprocess.PIPE, stdout= subprocess.PIPE)
    p.wait()
    return('scp_file: done')

def ssh_remove_file(remotefile, connection):
    """ scp 'infile' to destination dest_file.
    remotefile:
        Remote file to remove
    connection <dict>:
        Connection parameters to connect to remote host. A dictionary with key:value: 'scp_user' and 'host'.
    NOTE: scp_user has to be able to connect to remote host without being promtp for password (i.e. via ssh keygen pair)
    """
    cmd= 'ssh %s@%s rm %s' %(connection['scp_user'], connection['host'], remotefile)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    p= subprocess.Popen(cmd, shell= True, stderr= subprocess.PIPE, stdout= subprocess.PIPE)
    p.wait()
    return('ssh_remove_file: done')

def pg_upload_annotatePeaks_bed(infile, conn, schema, table, overwrite= False):
    """ Uploads to postgres the file output of annotatePeaks already converted to bed
    infile:
        Input file
    conn:
        Connection dictionary as conn= {'host':'xxx.xx.xxx.xxx', 'database':'test', 'user':'postgres'} "
    schema, table:
        Schema and table to import to. They will be created if they don't exists.        
    """
    pgconn= psycopg2.connect(host= conn['host'], database= conn['database'], user= conn['user'])
    cur= pgconn.cursor()
    create_schema(cur, schema)
    if overwrite is True:
        cur.execute(""" DROP TABLE IF EXISTS %s.%s""" %(schema, table))
    cur.execute("""
                CREATE TABLE %s.%s (
                    chrom text,
                    start integer,
                    "end" integer,
                    peak_id text,
                    score double precision,
                    strand text,
                    merged_peaks text,
                    annotation text,
                    detailed_annotation text,
                    distance_to_tss integer,
                    nearest_promoter_id text,
                    entrez_id text,
                    nearest_unigene_id text,
                    nearest_refseq_id text,
                    nearest_ensembl_id text,
                    gene_name text,
                    gene_alias text,
                    gene_description text
                );
                """ %(schema, table))
    ## scp file to upload to Postgres' machine
    pg_upload_file= '/tmp/tmp_pg_upload.txt'
    scp_file(infile= infile, connection= conn, dest_file= pg_upload_file)
    ## Upload to postgres
    cmd= "COPY %s.%s FROM $$%s$$ WITH DELIMITER E'\t' NULL $$%s$$;" %(schema, table, pg_upload_file, NA_STRING)
    print(cmd)
    cur.execute(cmd)
    cur.close()
    pgconn.commit()
    ssh_remove_file(remotefile= pg_upload_file, connection= conn)
    return()

def pg_upload_annotatePeaks_gquads_bed(infile, conn, schema, table, overwrite= False):
    """ Uploads to postgres the file output of annotatePeaks already converted to bed
    infile:
        Input file
    conn:
        Connection dictionary as conn= {'host':'xxx.xx.xxx.xxx', 'database':'test', 'user':'postgres'} "
    schema, table:
        Schema and table to import to. They will be created if they don't exists.        
    """
    pgconn= psycopg2.connect(host= conn['host'], database= conn['database'], user= conn['user'])
    cur= pgconn.cursor()
    create_schema(cur, schema)
    if overwrite is True:
        cur.execute(""" DROP TABLE IF EXISTS %s.%s""" %(schema, table))
    cur.execute("""
                CREATE TABLE %s.%s (
                    chrom text,
                    start integer,
                    "end" integer,
                    peak_id text,
                    score double precision,
                    strand text,
                    merged_peaks text,
                    annotation text,
                    detailed_annotation text,
                    distance_to_tss integer,
                    nearest_promoter_id text,
                    entrez_id text,
                    nearest_unigene_id text,
                    nearest_refseq_id text,
                    nearest_ensembl_id text,
                    gene_name text,
                    gene_alias text,
                    gene_description text,
                    gquad_chrom text,
                    gquad_start int,
                    gquad_end int,
                    gquad_id text,
                    gquad_seq text,
                    gquad_strand text,
                    distance_to_nearest_gquad int
                );
                """ %(schema, table))
    ## scp file to upload to Postgres' machine
    pg_upload_file= '/tmp/tmp_pg_upload.txt'
    scp_file(infile= infile, connection= conn, dest_file= pg_upload_file)
    ## Upload to postgres
    cmd= "COPY %s.%s FROM $$%s$$ WITH DELIMITER E'\t' NULL $$%s$$;" %(schema, table, pg_upload_file, NA_STRING)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    cur.execute(cmd)
    cur.close()
    pgconn.commit()
    ssh_remove_file(remotefile= pg_upload_file, connection= conn)
    return()

def pg_read_table(infile, schema, table, conn, read_table_opt):
    """ Uses read_table to upload a generic input file
    infile:
        File to import (will be scp'd to /tmp/)
    schema, table:
        Destination schema and table
    conn:
        Connection dictionary
    read_table_opt:
        Arguments to pass to read_table. *Excluding*: file, table
    """
    pgconn= psycopg2.connect(host= conn['host'], database= conn['database'], user= conn['user'])
    cur= pgconn.cursor()
    create_schema(cur, schema)
    pg_upload_file= '/tmp/tmp_pg_upload.txt'
    scp_file(infile= infile, connection= conn, dest_file= pg_upload_file)
    schema_table= schema + '.' + table
    if read_table_opt.strip() != '':
        read_table_opt= ', ' + read_table_opt
    cmd= """SELECT read_table($quote$ file:'%s', table:'%s' %s $quote$)""" %(pg_upload_file, schema_table, read_table_opt)
    log_cmd(log_file= CMD_FILE, logline= cmd)
    cur.execute(cmd)
    cur.close()
    pgconn.commit()    
    ssh_remove_file(remotefile= pg_upload_file, connection= conn)
    return()



# -----------------------------------------------------------------------------
#                                  TRITUME
# -----------------------------------------------------------------------------
  
def bam2bed_clean(bam_lims_dir, blacklist, bed_clean_dir= 'bed_clean', mapq= 20, echo= False, file_regex= '\.bam$', output_exts= '.clean.bed'):
    """
    ___________ This function deprecated in favour of clean_bam() ______________
    
    Executes samtools to remove reads with mapq < n. Also, remove blacklist region.
    
    bam_lims_dir <str>:
        Directory with bam files to clean. Typically os.path.join(project_dir, 'bam_lims')        
    bed_clean_dir <str>:
        Directory where output clean BEDs will be. Typically os.path.join(project_dir, 'bed_clean')
    blacklist <str>
        BED file with blacklist regions    
    mapq <int>
        mapq score to pass to samtools view -q (reads with less than this will be rejected)
    echo <True|False>
        If True just print out the system command and exit
    output_exts:
        Extension to give to bed output after filtering
    file_regex:
        Process input files matching this regex
    """
    if not os.path.exists(bed_clean_dir):
        os.mkdir(bed_clean_dir)
    
    bam_lims= os.listdir(bam_lims_dir)
    bam_lims= [x for x in bam_lims if re.search(file_regex, x)]
    bed_clean= [re.sub(file_regex, output_exts, x) for x in bam_lims]
    
    procs= []
    for bam, bed in zip(bam_lims, bed_clean):
        bamin= os.path.join(bam_lims_dir, bam)
        bedout= os.path.join(bed_clean_dir, bed)
        if blacklist is not None:
            cmd= 'samtools view -bu -q %s %s | bamToBed -i stdin | subtractBed -a stdin -b %s > %s' %(mapq, bamin, blacklist, bedout)
            log_cmd(log_file= CMD_FILE, logline= cmd)
            no_procs= NO_SUBPROCS / 3
        else:
            cmd= 'samtools view -bu -q %s %s | bamToBed -i stdin > %s' %(mapq, bamin, bedout)
            log_cmd(log_file= CMD_FILE, logline= cmd)
            no_procs= NO_SUBPROCS / 2
        if echo is False:
            p= subprocess.Popen( cmd, shell= True, stderr= open(STDERR, 'a'), stdout= open(STDOUT, 'a') )
            procs.append(p)
            if len(procs) >= no_procs:
                for x in procs:
                    x.wait()
                procs= []
        else:
            continue
    for x in procs:
        x.wait()
    ## Rename bed to 'unsorted', sort, index. This part only if echo= False
    if echo is False:
        procs= []
        unsorted_beds= []
        for bed in bed_clean:
            """ Rename BEDs to 'unsorted' and sort them """
            bedout= os.path.join(bed_clean_dir, bed)
            bed_unsorted= bedout + '.unsorted.bed'
            unsorted_beds.append(bed_unsorted)
            os.rename(bedout, bed_unsorted)
            p= igvtools_sort(infile= bed_unsorted, outfile= bedout, echo= echo)
            procs.append(p)
            if len(procs) >= NO_SUBPROCS:
                for x in procs:
                    x.wait()
                procs= []
        for x in procs:
            x.wait()
        procs= []
        for bed in bed_clean:
            """ Index sorted beds """
            bedout= os.path.join(bed_clean_dir, bed)
            p= igvtools_index(bedout, echo)
            procs.append(p)
            if len(procs) >= NO_SUBPROCS:
                for x in procs:
                    x.wait()
                procs= []
        for x in procs:
            x.wait()
        for x in unsorted_beds:
            """ Remopve unsorted BEDs """
            print('Removing unsorted bed file: %s' %(x))
            os.remove(x)
    return('bam2bed_clean: done')

def open_stderr_file(suffix= 'stderr.txt'):
    """ Return an open file where to send stderr from subprocess.Popen
    Creates file in dir stderr."""
    stderr_dir= 'stderr'
    if not os.path.exists(stderr_dir):
        os.makedirs(stderr_dir)
    filename= os.path.join(stderr_dir, 'stderr_' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + suffix)
    return(open(filename, 'w'))

def open_stdout_file(suffix= 'stdout.txt'):
    """ Return an open file where to send stdout from subprocess.Popen
    Creates file in dir stdout."""
    stdout_dir= 'stdout'
    if not os.path.exists(stdout_dir):
        os.makedirs(stdout_dir)
    filename= os.path.join(stdout_dir, 'stdout_' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + suffix)
    return(open(filename, 'w'))

   