'''
Created on Nov 20, 2009

@author: mkiyer
'''
import logging
import os
import shutil
import copy
import collections
import subprocess

import numpy as np
import pysam

import veggie.app.alignment.picard as picard
from veggie.io.wiggle import WiggleFileWriter

def replace_sam_header(insamfile, outsamfile, headerfile):
    logger = logging.getLogger(__name__)    
    outsam = open(outsamfile, "w")
    references = []
    # read in the header and get a list of references
    for header_line in open(headerfile):
        header_fields = header_line.strip().split('\t')
        if header_fields[0] == '@SQ':
            fields = dict([field.split(':', 1) for field in header_fields[1:]])
            assert 'SN' in fields
            references.append(fields['SN'])
        # write header directly to outfile
        outsam.write(header_line)
    # now write SAM file without its header
    insam = open(insamfile, "r")
    rejected_reads = 0    
    getrname = lambda r: r.split('\t', 3)[2]
    for line in insam:
        if line.startswith("@"):
            continue
        rname = getrname(line)
        if rname == '*':
            outsam.write(line)
        elif rname in references:
            outsam.write(line)
        else:
            rejected_reads += 1    
    insam.close()
    outsam.close()
    logger.debug('replace_sam_header: removed %d reads from non-matching references' % (rejected_reads))
    return outsamfile

def process_tophat_sam(insamfile, outbamfile, headerfile, rmdup=True, tmp_path=None, keep_tmp=False):
    logger = logging.getLogger(__name__)
    tmpfiles = []    
    # replace the tophat SAM header and filter reads from bad references
    myheadersam = os.path.join(tmp_path, os.path.splitext(os.path.basename(insamfile))[0] + '.header.sam')
    tmpfiles.append(myheadersam)
    logger.debug("Filtering references from SAM %s -> SAM %s" % (insamfile, myheadersam))
    replace_sam_header(insamfile, myheadersam, headerfile=headerfile)
    nextinfile = myheadersam    

    # merge SAM -> BAM
    mymergebam = os.path.join(tmp_path, os.path.splitext(os.path.basename(insamfile))[0] + '.merge.bam')
    tmpfiles.append(mymergebam)
    logger.debug("Merging SAM %s -> BAM %s" % (nextinfile, mymergebam))
    merge_app = picard.MergeSamFiles()
    retcode, resultpaths = merge_app.run([nextinfile], mymergebam)
    if retcode != 0: raise PicardError("picard merge error")
    nextinfile = mymergebam

    # remove duplicates
    if rmdup:
        mydupbam = os.path.join(tmp_path, os.path.splitext(os.path.basename(insamfile))[0] + '.rmdup.bam')
        tmpfiles.append(mydupbam)
        logger.debug("Removing duplicated SAM %s -> BAM %s" % (nextinfile, mydupbam))
        markdup_app = picard.MarkDuplicates()
        retcode, resultpaths = markdup_app.run(nextinfile, mydupbam)
        if retcode != 0: raise PicardError("picard mark duplicates error")    
        nextinfile = mydupbam

    # index BAM file for samtools
    mybam = nextinfile
    mybai = mybam + '.bai'
    logger.debug("Indexing Sorting BAM %s" % (nextinfile))
    subprocess.call(['samtools', 'index', nextinfile])
    tmpfiles.append(mybai)

    # copy to final output file
    shutil.copyfile(mybam, outbamfile)
    shutil.copyfile(mybai, outbamfile + '.bai')
    
    # remove temporary files
    if not keep_tmp:
        for tmpfile in tmpfiles:
            logger.debug("Removing %s" % tmpfile)
            os.remove(tmpfile)    
    return outbamfile

def prepend_sam_header(insamfile, outsamfile, template):
    samfile = pysam.Samfile(insamfile, "r")
    nreferences = samfile.nreferences
    samfile.close()    
    if nreferences > 0:
        raise ValueError("passed SAM file %s with valid header to" 
                         " prepend_sam_header method" % insamfile)
    if not os.path.exists(template):
        raise ValueError("template SAM file not found: %s" % template)
    # concatenate the template file with the input sam file
    destination = open(outsamfile, 'wb')        
    shutil.copyfileobj(open(template,'rb'), destination)
    shutil.copyfileobj(open(insamfile,'rb'), destination)
    destination.close()
    return outsamfile

def filter_references(insamfile, outsamfile, references):    
    if (references == None):
        return
    logger = logging.getLogger(__name__)
    outsam = open(outsamfile, "w")
    insam = open(insamfile, "r")
    for header_line in insam:
        # once we find the first line that does not start with '@' the header is over
        if not header_line.startswith("@"):
            break
        header_fields = header_line.strip().split('\t')
        if header_fields[0] == '@SQ':
            fields = dict([field.split(':', 1) for field in header_fields[1:]])
            assert 'SN' in fields
            if fields['SN'] not in references:
                logger.debug('filter_references: rejected reference %s' % fields['SN'])
                continue        
        outsam.write(header_line)
    # still have to deal with the first read before we can start a new loop
    rejected_reads = 0    
    getrname = lambda r: r.split('\t', 3)[2]
    read_line = header_line
    rname = getrname(read_line)
    if rname == '*':
        outsam.write(read_line)
    elif rname in references:
        outsam.write(read_line)
    else:
        rejected_reads += 1
    # loop through rest of reads
    for read_line in insam:
        rname = getrname(read_line)
        if rname == '*':
            outsam.write(read_line)
        elif rname in references:
            outsam.write(read_line)
        else:
            rejected_reads += 1    
    insam.close()
    outsam.close()
    logger.debug('filter_references: removed %d reads from non-matching references' % (rejected_reads))
    return outsamfile




def generate_pileup_chunks(read_iterator, 
                           start, end, 
                           unique_only=True, 
                           merge_strands=False,
                           fragment_length=-1,
                           dtype=np.uint32,
                           max_rlen=2048,
                           chunk_size=8192):
    '''
    don't use this function with RNA-seq data because it does not pileup spliced reads properly
    '''
    assert chunk_size >= max_rlen
    assert end > start 
    # figure out the boundaries of the first chunk
    chunk_bounds = (start,
                    min(start + chunk_size, end))
    # allocate an array to store the largest possible chunk
    chunk_data = np.zeros((chunk_bounds[1] - chunk_bounds[0] + max_rlen,), dtype=dtype)
    chunk_dirty = False
    # reads must be sorted, so implement a check for this
    prev_read_pos = -1
    # iterate through reads
    for read in read_iterator:
        # ignore duplicate reads
        if unique_only and read.is_duplicate:
            continue            
        # get attributes from AlignedRead object
        read_start = read.pos
        read_length = read.rlen
        # ensure reads are sorted
        assert read_start >= prev_read_pos
        prev_read_pos = read_start
        # ensure reads aren't too long
        assert read_length <= max_rlen
        # adjust fragment length if too small (can't be smaller than read length)
        if fragment_length <= 0:
            fragment_length = read_length
        # shift the reverse strand reads if the merge_strands option is enabled
        if merge_strands is True:            
            if read.is_reverse:
                read_start = max(0, read_start + read_length - fragment_length)
        # now that negative strand tags are shifted, modify the effective read 
        # length to the user specified a DNA fragment length
        read_length = fragment_length        
        # only consider reads that align within the desired region
        if read_start >= end:
            break
        if (read_start + read_length) > start:
            # if the read starts after the end of the current chunk, need to write the 
            # chunk and shift to the next chunk
            while read_start >= chunk_bounds[1]:
                if chunk_dirty:
                    # yield chunk
                    yield (chunk_bounds[0]-start), (chunk_bounds[1]-start), chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
                    # add chunk to array
                    #arr[chunk_bounds[0]-start:chunk_bounds[1]-start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
                    # shift end of chunk to beginning of next chunk and clear rest of array
                    chunk_data[0:max_rlen] = chunk_data[-max_rlen:]
                    chunk_data[max_rlen:] = 0
                    # check if chunk no longer dirty
                    chunk_dirty = chunk_data[0:max_rlen].any()
                # get next chunk
                chunk_bounds = (chunk_bounds[0] + chunk_size,
                                min(chunk_bounds[1] + chunk_size, end))
            # add coverage from the current read
            chunk_data[max(0, read_start - chunk_bounds[0]):read_start + read_length - chunk_bounds[0]] += 1
            chunk_dirty = True
    # flush last chunks
    while chunk_dirty and (chunk_bounds[0] < end):
        # yield chunk
        yield (chunk_bounds[0]-start), (chunk_bounds[1]-start), chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
        # shift end of chunk to beginning of next chunk and clear rest of array
        chunk_data[0:max_rlen] = chunk_data[-max_rlen:]
        chunk_data[max_rlen:] = 0
        # check if chunk no longer dirty
        chunk_dirty = chunk_data[0:max_rlen].any()
        # get next chunk
        chunk_bounds = (chunk_bounds[0] + chunk_size,
                        min(chunk_bounds[1] + chunk_size, end))        
    # delete chunk array
    del chunk_data

def pileup_reads(read_iterator, 
                 start, end, 
                 dtype=np.uint32,
                 **kwargs):
    assert end > start
    arr = np.zeros(end-start, dtype=dtype)
    for pileupchunk in generate_pileup_chunks(read_iterator, start, end, dtype=dtype, **kwargs):
        chunk_start, chunk_end, chunk_data = pileupchunk
        #print 'chunk_start:', chunk_start
        #print 'chunk_end:', chunk_end
        #print 'chunk_data:', chunk_data
        arr[chunk_start:chunk_end] += chunk_data
    return arr

def bam_to_wiggle(inbamfile, wigfile, 
                  unique_only=False,
                  merge_strands=False,
                  fragment_length=-1,
                  norm=False):
    logger = logging.getLogger(__name__)    
    bamfile = pysam.Samfile(inbamfile, 'rb')

    # count reads and get other info from BAM file
    reads = 0
    read_lengths = collections.defaultdict(lambda: 0)
    for read in bamfile.fetch():
        # only consider mapped reads for now
        if read.is_unmapped:
            continue
        reads += 1
        read_lengths[read.rlen] += 1
    # find normalization factor
    if norm == True:        
        # find best read length
        best_read_length, best_count = 0, 0
        for read_length, count in read_lengths.iteritems():
            if count > best_count:
                best_count = count
                best_read_length = read_length
        assert best_read_length > 0
        # normalize by read length and number of mapped reads
        norm_factor = (1.0e9) / (max(best_read_length, fragment_length) * reads)
    else:
        norm_factor = 1.0

    refs = bamfile.references
    lengths = bamfile.lengths
    wigglewriter = WiggleFileWriter(wigfile, compress=True, span=10)
    # convert each chromosome to wiggle
    for ref, length in zip(refs, lengths):
        # pileup the reads chunks at a time        
        for pileupchunk in generate_pileup_chunks(bamfile.fetch(ref), 
                                                  start=0, 
                                                  # TODO: some wiggle writing error with length going past limit
                                                  end=length - max(0, fragment_length), 
                                                  unique_only=unique_only, 
                                                  merge_strands=merge_strands,
                                                  fragment_length=fragment_length,
                                                  chunk_size=1048576):
            chunk_start, chunk_end, chunk_data = pileupchunk
            if norm == True:
                chunk_data *= norm_factor
            #wigglewriter.write_variable_step(ref, chunk_start, chunk_end, chunk_data)
            wigglewriter.write_span(ref, chunk_start, chunk_end, chunk_data)
        logger.debug("BAM %s -> WIG %s chromsome %s finished" % (inbamfile, wigfile, ref))    
    # wiggle file done
    wigglewriter.close()
    # done with BAM file
    bamfile.close()

class SamtoolsError(Exception):
    '''exception raised in case of an error incurred in the samtools library.'''
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)

class PicardError(Exception):
    '''exception raised in case of an error incurred in the samtools library.'''
    def __init__(self, value):
        self.value = value
    def __str__(self):
        return repr(self.value)



def filter_reads(inbamfile, outbamfile,
                 unique_only=True,
                 references=None):    
    if (unique_only is False and references is None):
        return
    logger = logging.getLogger(__name__)    
    samfile = pysam.Samfile(inbamfile, "rb")    
    if references is not None:
        # convert to set to do fast lookup
        references = set(references)
        rejected_references = set()
        # create a new header that prunes out undesired references
        new_header = copy.deepcopy(samfile.header)
        new_header['SQ'] = []
        for field_dict in samfile.header['SQ']:
            if (('SN' in field_dict) and 
                (field_dict['SN'] not in references)):
                rejected_references.add(field_dict['SN'])
                logger.debug('filter_reads: rejected reference %s' % (field_dict['SN']))
                continue
            new_header['SQ'].append(field_dict)        
        filtered = pysam.Samfile(outbamfile, "wb", header=new_header)
    else:
        filtered = pysam.Samfile(outbamfile, "wb", template=samfile)

    rejected_reads = 0
    duplicate_reads = 0
    for read in samfile.fetch():
        if read.is_duplicate and unique_only:
            duplicate_reads += 1
            continue
        if ((references is not None) and
            samfile.getrname(read.rname) in rejected_references):
            rejected_reads += 1
            continue
        filtered.write(read)
    filtered.close()
    samfile.close()
    logger.debug('filter_reads: removed %d duplicate reads' % (duplicate_reads))
    logger.debug('filter_reads: rejected %d reads from undesired references' % (rejected_reads))
    return outbamfile


def prepare_bam(insamfile, 
                outsamfile=None,
                unique_only=True,
                template=None,
                references=None,
                tmp_path=None,
                keep_tmp=False):
    '''
    this function converts a samfile into a sorted bamfile with duplicate reads marked
    '''
    logger = logging.getLogger(__name__)
    logging_prefix='prepare_bam'
    tmp_files = set([])

    # set file names
    if outsamfile is None:
        outsamfile_prefix = os.path.basename(os.path.splitext(insamfile)[0])
        final_bamfile = os.path.splitext(insamfile)[0] + '.bam'
        final_index_file = os.path.splitext(final_bamfile)[0] + '.bai'    
    else:
        outsamfile_prefix = os.path.basename(os.path.splitext(outsamfile)[0])
        final_bamfile = os.path.splitext(outsamfile)[0] + '.bam'
        final_index_file = os.path.splitext(final_bamfile)[0] + '.bai'    
    # set path for tmp files
    if (tmp_path is None) or (not os.path.exists(tmp_path)):
        tmp_path = os.path.dirname(final_bamfile)
        logger.debug("setting tmp_path to %s" % (tmp_path))
    # check if the final result exists
    if (os.path.exists(final_bamfile) and 
        os.path.exists(final_index_file)):
        logger.debug('%s: final prepare files %s,%s exists' % (logging_prefix, 
                                                               final_bamfile,
                                                               final_index_file))
        return final_bamfile
    # after each step rename the output
    result_files = (insamfile, None)

    # check references first
    mysamfile = pysam.Samfile(result_files[0], "r")
    nreferences = mysamfile.nreferences
    mysamfile.close()
    if nreferences == 0:
        logger.debug("%s: SAM file has no header" % (logging_prefix))
        if template is None:
            logger.debug("%s: no template header specified, cannot repair SAM header" % (logging_prefix))
            raise ValueError("template SAM file needed to fix broken header, but none found")
        # fix SAM header be prepending template header
        logger.debug("%s: prepending header and copying SAM file" % (logging_prefix))
        header_samfile = os.path.join(tmp_path, outsamfile_prefix + '.header.sam')
        tmp_files.add(header_samfile)
        header_samfile = prepend_sam_header(result_files[0], header_samfile, template)
        # reset the current output
        result_files = (header_samfile, None)

    # merge step
    merge_bamfile = os.path.join(tmp_path, outsamfile_prefix + '.merge.bam')
    tmp_files.add(merge_bamfile)
    merge_app = picard.MergeSamFiles()
    retcode, resultpaths = merge_app.run([result_files[0]], merge_bamfile)
    logger.debug('%s: picard merge returned %d' % (logging_prefix, retcode))
    if retcode != 0: raise PicardError("picard merge error")
    # set results
    result_files = (merge_bamfile, None)

    # mark duplicates step
    markdup_bamfile = os.path.join(tmp_path, outsamfile_prefix + '.markdup.bam')    
    tmp_files.add(markdup_bamfile)
    markdup_app = picard.MarkDuplicates()
    retcode, resultpaths = markdup_app.run(result_files[0], markdup_bamfile)
    logger.debug('%s: picard mark duplicates returned %d' % (logging_prefix, retcode))
    if retcode != 0: raise PicardError("picard mark duplicates error")    
    # index the BAM file
    markdup_index_file = markdup_bamfile + '.bai'
    tmp_files.add(markdup_index_file)
    if os.path.exists(markdup_index_file):
        logger.debug('%s: index file %s exists' % (logging_prefix, markdup_index_file))
    else:
        logger.debug('%s: indexing file %s' % (logging_prefix, markdup_bamfile))
        pysam.index(markdup_bamfile)
    # set results
    result_files = (markdup_bamfile, markdup_index_file)
    
    # remove marked duplicates and/or remove duplicate reads
    if unique_only or (references is not None):
        filter_bamfile = os.path.splitext(result_files[0])[0] + '.filter.bam'
        tmp_files.add(filter_bamfile)
        if os.path.exists(filter_bamfile):
            logger.debug("%s: filtered reads file %s exists" % (logging_prefix,
                                                                filter_bamfile))
        else:
            logger.debug("%s: filtering reads" % (logging_prefix))
            filter_reads(result_files[0], filter_bamfile, unique_only, references)            
        # index the BAM file
        filter_index_file = filter_bamfile + '.bai'
        tmp_files.add(filter_index_file)
        if os.path.exists(filter_index_file):
            logger.debug('%s: index file %s exists' % (logging_prefix, filter_index_file))
        else:
            logger.debug('%s: indexing file %s' % (logging_prefix, filter_bamfile))
            pysam.index(filter_bamfile)
        result_files = (filter_bamfile, filter_index_file)
    
    if not keep_tmp:
        logger.debug('%s: removing temporary files' % (logging_prefix))
        # remove temporary files and rename output
        tmp_files.difference_update(set(result_files))
        for f in tmp_files:
            os.remove(f)
        logger.debug('%s: renaming results files' % (logging_prefix))  
        shutil.move(result_files[0], final_bamfile)
        shutil.move(result_files[1], final_index_file)
    else:
        shutil.copyfile(result_files[0], final_bamfile)
        shutil.copyfile(result_files[1], final_index_file)
    return final_bamfile

def prepare_bam_many(insamfiles, 
                     bamfile,
                     unique_only=True,
                     template=None,
                     references=None,
                     tmp_path=None,
                     keep_tmp=False):
    '''
    this function converts a list of input samfiles into a sorted bamfile with 
    duplicate reads marked
    '''
    logger = logging.getLogger(__name__)
    logging_prefix='prepare_bam'
    tmp_files = set([])

    # set file names
    bamfile_prefix = os.path.basename(os.path.splitext(bamfile)[0])
    bamfile_index = os.path.splitext(bamfile)[0] + '.bai'    
    # set path for tmp files
    if (tmp_path is None) or (not os.path.exists(tmp_path)):
        tmp_path = os.path.dirname(bamfile)
        logger.debug("setting tmp_path to %s" % (tmp_path))
    # check if the final result exists
    if (os.path.exists(bamfile) and 
        os.path.exists(bamfile_index)):
        logger.debug('%s: final prepare files %s,%s exists' % (logging_prefix, 
                                                               bamfile,
                                                               bamfile_index))
        return bamfile
    # setup result files
    result_files = insamfiles

    # check references first
    new_result_files = []
    for insamfile in result_files:
        mysamfile = pysam.Samfile(insamfile, "r")
        nreferences = mysamfile.nreferences
        mysamfile.close()
        if nreferences == 0:
            logger.debug("%s: SAM file has no header" % (logging_prefix))
            if template is None:
                logger.debug("%s: no template header specified, cannot repair SAM header" % (logging_prefix))
                raise ValueError("template SAM file needed to fix broken header, but none found")
            # fix SAM header be prepending template header
            logger.debug("%s: prepending header and copying SAM file" % (logging_prefix))
            header_samfile = os.path.join(tmp_path, bamfile_prefix + '.header.sam')
            tmp_files.add(header_samfile)
            header_samfile = prepend_sam_header(insamfile, header_samfile, template)
            # add to results
            new_result_files.append(header_samfile)
        else:
            new_result_files.append(insamfile)
    # new output
    result_files = new_result_files

    # merge step
    subprocess.call
    
    # TODO: incorporate index information in config file
    merge_bamfile = os.path.join(tmp_path, bamfile_prefix + '.merge.bam')
    tmp_files.add(merge_bamfile)
    merge_app = picard.MergeSamFiles()
    retcode, resultpaths = merge_app.run(result_files, merge_bamfile)
    logger.debug('%s: picard merge returned %d' % (logging_prefix, retcode))
    if retcode != 0: raise PicardError("picard merge error")
    # set results
    result_files = (merge_bamfile, None)

    # mark duplicates step
    markdup_bamfile = os.path.join(tmp_path, bamfile_prefix + '.markdup.bam')    
    tmp_files.add(markdup_bamfile)
    markdup_app = picard.MarkDuplicates()
    retcode, resultpaths = markdup_app.run(result_files[0], markdup_bamfile)
    logger.debug('%s: picard mark duplicates returned %d' % (logging_prefix, retcode))
    if retcode != 0: raise PicardError("picard mark duplicates error")    
    # index the BAM file
    markdup_index_file = markdup_bamfile + '.bai'
    tmp_files.add(markdup_index_file)
    if os.path.exists(markdup_index_file):
        logger.debug('%s: index file %s exists' % (logging_prefix, markdup_index_file))
    else:
        logger.debug('%s: indexing file %s' % (logging_prefix, markdup_bamfile))
        pysam.index(markdup_bamfile)
    # set results
    result_files = (markdup_bamfile, markdup_index_file)
    
    # remove marked duplicates and/or remove duplicate reads
    if unique_only or (references is not None):
        filter_bamfile = os.path.splitext(result_files[0])[0] + '.filter.bam'
        tmp_files.add(filter_bamfile)
        if os.path.exists(filter_bamfile):
            logger.debug("%s: filtered reads file %s exists" % (logging_prefix,
                                                                filter_bamfile))
        else:
            logger.debug("%s: filtering reads" % (logging_prefix))
            filter_reads(result_files[0], filter_bamfile, unique_only, references)            
        # index the BAM file
        filter_index_file = filter_bamfile + '.bai'
        tmp_files.add(filter_index_file)
        if os.path.exists(filter_index_file):
            logger.debug('%s: index file %s exists' % (logging_prefix, filter_index_file))
        else:
            logger.debug('%s: indexing file %s' % (logging_prefix, filter_bamfile))
            pysam.index(filter_bamfile)
        result_files = (filter_bamfile, filter_index_file)
    
    if not keep_tmp:
        logger.debug('%s: removing temporary files' % (logging_prefix))
        # remove temporary files and rename output
        tmp_files.difference_update(set(result_files))
        for f in tmp_files:
            os.remove(f)
        logger.debug('%s: renaming results files' % (logging_prefix))  
        shutil.move(result_files[0], bamfile)
        shutil.move(result_files[1], bamfile_index)
    else:
        shutil.copyfile(result_files[0], bamfile)
        shutil.copyfile(result_files[1], bamfile_index)
    return bamfile
