'''
Created on Nov 20, 2009

@author: mkiyer
'''
import pysam
import sys
import veggie.app.ucsc as ucsc

from veggie.genome import default_genome
from veggie.genome.chrom import HG18

import glob
import os
import re
import shutil
import logging
import shutil
import collections
import multiprocessing
from optparse import OptionParser
    
def norm_rpm(x, reads, read_length):
    return int(round(float(x) * (1.0e9 / (read_length * reads))))
    
def reads2junctions(read_iterator):
    # enumerate the different CIGAR format operations based on the SAM format
    # specification
    MATCH = 0
    INSERTION = 1
    DELETION = 2
    SKIP = 3
    SOFTCLIP = 4
    HARDCLIP = 5
    PADDING = 6
    juncs = collections.defaultdict(lambda: 0)
    jstart, jend, jstrand, jcount = None, None, None, 0
    for read in read_iterator:
        pos = 0
        # read the individual fields of the cigar string
        for cigar_op in read.cigar:
            op, length = cigar_op
            # skips in the alignment are considered splice junctions
            if op == SKIP:
                start, end, strand = read.pos + pos, read.pos + pos + length, read.opt('XS')
                # assuming alignments are sorted, count alignments that
                # hit at exactly this position
                if not jstart:
                    jstart, jend, jstrand = start, end, strand
                elif (jstart != start) or (jend != end):
                    juncs[(jstart, jend, jstrand)] += jcount
                    jstart, jend, jstrand, jcount = start, end, strand, 0
                jcount += 1
            else:
                pos += length
    # add any remaining junctions to the list
    if jcount > 0:
        juncs[(jstart, jend, jstrand)] += jcount
    return sorted(juncs.items())

def bam2juncs(bamfile, outbedfile,
              norm=False,
              reads=0,
              read_length=0):
    if norm:
        assert reads != 0
        assert read_length != 0             
    njuncs = 0

    ucsc_references = set(HG18.chrom_sizes.keys())
    for ref in bamfile.references:
        if ref not in ucsc_references:
            continue
        juncs = reads2junctions(bamfile.fetch(ref))
        for id, junc in enumerate(juncs):
            j, count = junc
            start, end, strand = j        
            njuncs += count
            if norm:
                count = norm_rpm(count, reads, read_length)
            outbedfile.write("\t".join([ref,
                                        str(start),
                                        str(end),
                                        'JUNC' + str(id),
                                        '%d' % count,
                                        chr(strand)]))
            outbedfile.write("\n")
    return njuncs

def bam2wig(samfile, wigglefile,  
            norm=False,
            reads=0,
            read_length=0):
    if norm:
        assert reads != 0
        assert read_length != 0        

    ucsc_references = set(HG18.chrom_sizes.keys())    
    for chrom in samfile.references:
        if chrom not in ucsc_references:
            continue
        prev_pos = None
        for pileupcolumn in samfile.pileup(chrom):
            pos = pileupcolumn.pos
            cov = pileupcolumn.n
            if norm:
                cov = norm_rpm(cov, reads, read_length)
            if prev_pos == None:
                if pos > 0:
                    wigglefile.write("fixedStep chrom=%s start=%d step=1\n" % (chrom, pos))
                    wigglefile.write("0\n")
                else:
                    wigglefile.write("fixedStep chrom=%s start=%d step=1\n" % (chrom, pos+1)) 
            elif prev_pos + 1 != pos:
                wigglefile.write("fixedStep chrom=%s start=%d step=1\n" % (chrom, pos))
                wigglefile.write("0\n")
            # advance to next position
            prev_pos = pos
            wigglefile.write("%d\n" % cov)

def wig2bigwig(infile, outfile):
    logger = logging.getLogger("wig2bigwig")
    logger.debug("Running UCSC wigToBigWig:")
    logger.debug("\tinput file: %s" % infile)
    logger.debug("\toutput file: %s" % outfile)
    retcode = ucsc.wigToBigWig(infile, outfile, default_genome)
    return retcode

def bed2bigbed(infile, outfile):
    logger = logging.getLogger("bed2bigbed")
    logger.debug("Running UCSC bedToBigBed tool")
    logger.debug("\tinput file: %s" % infile)
    logger.debug("\toutput file: %s" % outfile)    
    retcode = ucsc.bedToBigBed(infile, outfile, default_genome)
    return retcode

def bam2ucsc(insamfile, output_prefix, big=True, norm=True, keep_tmp=False):
    logger = logging.getLogger("bam2ucsc")
    logger.info("Input file: %s" % (insamfile))
    logger.info("Output prefix: %s" % (output_prefix))
    logger.info("Convert to bigwig/bigbed: %s" % (big))
    logger.info("Normalize: %s" % (norm))
    logger.info("Keep tmp files: %s" % (keep_tmp))
    # open input file
    bamfile = pysam.Samfile(insamfile, 'rb')    
    # count reads
    logger.debug("Counting reads in input file")
    reads = 0
    read_lengths = collections.defaultdict(lambda: 0)
    for read in bamfile.fetch():
        reads += 1
        read_lengths[read.rlen] += 1
    logger.debug("Number of reads found: %d" % (reads))
    logger.debug("Distribution of read lengths:")
    best_read_length, best_count = 0, 0
    for read_length, count in sorted(read_lengths.items()):
        logger.debug("\t%d: %d" % (read_length, count))
        if count > best_count:
            best_read_length = read_length

    # convert bam to wiggle file
    wigfile = output_prefix + '.wig'
    wigfhd = open(wigfile, 'w')
    logger.debug("Converting from BAM to wig format")
    logger.debug("Output file: %s" % wigfile)
    logger.debug("Normalized: %s" % norm)
    bam2wig(bamfile, wigfhd,
            norm=norm,
            reads=reads,
            read_length=best_read_length)
    wigfhd.close()
    if big:
        bigwigfile = output_prefix + '.bw'
        retcode = wig2bigwig(wigfile, bigwigfile)                
        if retcode != 0:
            logger.error("wigToBigWig returned error code %d" % retcode)

    # find junctions and output as bed file
    juncfile = output_prefix + '_junctions.bed'
    juncfhd = open(juncfile, 'w')    
    logger.debug("Converting junctions to bed format")
    logger.debug("Output file: %s" % juncfile)
    logger.debug("Normalized: %s" % norm)
    njuncs = bam2juncs(bamfile, juncfhd, norm=norm, reads=reads, 
                       read_length=best_read_length)
    juncfhd.close()
    logger.debug("Found junctions: %d" % njuncs)    
    bamfile.close()
    if big:
        if njuncs > 0:
            bigbedfile = output_prefix + '_junctions.bb'
            retcode = bed2bigbed(juncfile, bigbedfile)
            if retcode != 0:
                logger.error("bed2bigbed returned error code %d" % retcode)

def bam2ucsc_worker(arg):
    insamfile, output_prefix, big, norm, keep_tmp = arg
    bam2ucsc(insamfile, output_prefix, 
             big=big, norm=norm, keep_tmp=keep_tmp)
    return insamfile

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    # parse command line
    optionparser = OptionParser("usage: %prog [options] <infile.bam infile2.bam ...>")
    optionparser.add_option("-i", "--input_files", dest="input_files", default=None)
    optionparser.add_option("--input_re", dest="input_re", default=None)    
    optionparser.add_option("-n", "--name", dest="name", default=None)
    optionparser.add_option("-o", "--output_dir", dest="output_dir", default=None,
                            help="output directory [default: current directory]")
    optionparser.add_option("-b", "--big", dest="big", action="store_true", 
                            default=False,
                            help="create bigWig/bigBed files [default: %default]")
    optionparser.add_option("--norm", dest="norm", action="store_true", default=False,
                            help="normalize output [default: %default]")
    optionparser.add_option("--tmp", dest="tmp", action="store_true", default=False,
                            help="keep tmp files [default: %default]")
    optionparser.add_option("-p", "--processors", dest="processors", type="int", default=1,
                            help="divide the job into n processes")        
    (options, args) = optionparser.parse_args()

    # resolve output directory
    if options.output_dir != None and (os.path.exists(options.output_dir)):
        output_dir = options.output_dir
        if not os.path.exists(output_dir):
            optionparser.error("Output directory %s does not exist" % (output_dir))
    else:
        output_dir = os.getcwd()

    if options.input_files != None:
        input_re = None
        if options.input_re != None:
            input_re = re.compile(options.input_re)
        # construct parameters consisting of input/output groups
        output_prefixes = []
        filenames = glob.glob(options.input_files)
        for filename in filenames:
            if input_re != None:
                m = input_re.match(filename)
                if m == None:
                    optionparser.error("Error matching filename %s to regex %s" % (filename, options.input_re))
                output_prefix = os.path.join(output_dir, m.group(1))
            else:
                output_prefix = os.path.join(output_dir, os.path.basename(filename))
            output_prefixes.append(output_prefix)
        if len(set(output_prefixes)) != len(set(filenames)):
            optionparser.error("Could not resolve a unique set of output file names corresponding to the input files")
        params = zip(filenames, output_prefixes)
    else:
        # look for filenames on the command line
        filenames = args
        name_prefix = options.name if options.name != None else ''
        output_prefixes = [] 
        for arg in args:
            if options.name != None:
                output_name = options.name
            else:
                output_name = os.path.basename(arg).split('.')[0]
            output_prefixes.append(os.path.join(output_dir, output_name))
        if len(set(output_prefixes)) != len(set(filenames)):
            optionparser.error("Could not resolve a unique set of output file names corresponding to the input files")
        params = zip(filenames, output_prefixes)

    # start worker processes            
    tasks = []
    for input_file, output_prefix in params:
        tasks.append((input_file, output_prefix, options.big, options.norm, options.tmp))
    pool = multiprocessing.Pool(options.processors)
    imap_unordered_it = pool.imap_unordered(bam2ucsc_worker, tasks)
    for finishedfile in imap_unordered_it:
        logging.debug("%s finished" % finishedfile)
    pool.close()
    pool.join()


#    # print track information
#    ip_address = '141.214.4.250'
#    dest_path = 'bigwig/tracks/chipseq'
#    dest_volume = 'archive10'
#    track_name = os.path.basename(outfile).split('.')[0]
#    track_header = ['track',
#                    'type=bigWig',
#                    'name=%s' % track_name,
#                    'description=%s' % track_name,
#                    'bigDataUrl=http://%s/%s/%s' % (ip_address, dest_path, os.path.basename(outfile)),
#                    'visibility=full']
#    print '# Track header for file:', outfile
#    print ' '.join(track_header)
#    # try installing file
#    dest_file = os.path.join(os.path.sep, dest_volume, dest_path, os.path.basename(outfile))
#    can_install = os.access(os.path.dirname(dest_file), os.W_OK)
#    if can_install:    
#        logger.debug("Moving file %s to destination %s" % (outfile, dest_file))
#        shutil.copyfile(outfile, dest_file)
#    else:
#        print '# Make sure to move the output file to the directory:', os.path.dirname(dest_file)
#    return can_install, ' '.join(track_header)
#    if retcode != 0:
#        logger.error("bedToBigBed returned error code %d" % retcode)
#        return False, None    
#    # print track information
#    ip_address = '141.214.4.250'
#    dest_path = 'bigwig/tracks/chipseq'
#    dest_volume = 'archive10'
#    track_name = os.path.basename(outfile).split('.')[0]    
#    track_header = ['track',
#                    'type=bigBed',
#                    'name=%s' % track_name,
#                    'description=%s' % track_name,
#                    'bigDataUrl=http://%s/%s/%s' % (ip_address, dest_path, os.path.basename(outfile)),
#                    'visibility=full']
#    print '# Track header for file:', outfile
#    print ' '.join(track_header)
#    # try installing file
#    dest_file = os.path.join(os.path.sep, dest_volume, dest_path, os.path.basename(outfile))
#    can_install = os.access(os.path.dirname(dest_file), os.W_OK)
#    if can_install:
#        logger.debug("Moving file %s to destination %s" % (outfile, dest_file))
#        shutil.copyfile(outfile, dest_file)
#    else:
#        print '# Make sure to move the output file to the directory:', os.path.dirname(dest_file)
#    return can_install, ' '.join(track_header)