'''
Created on Sep 2, 2009

@author: mkiyer
'''

from array import array
from veggie.align.alignment import maps_to_genome
from veggie.genome.chrom import get_chrom_names, get_chrom_length
import h5py
import logging
import numpy as np
import os
import sys

class FixedWidthReadTrack(object):
    # data type used for python arrays
    DTYPE = 'I'
    
    def __init__(self, read_length):
        self.read_length = read_length
        self.reads = {}
        self.read_cov = {}
        self.strands = 2
        self.total = 0
        self.total_unique = 0
        self.well_merged = False
    
    def add(self, chrom, fiveendpos, strand):
        '''
        Add a range to the list according to the sequence name.
        
        chrom: chromosome name
        fiveendpos: 5' position of start of the read
        strand: 0 - plus, 1 - minus
        '''
        if not self.reads.has_key(chrom):
            # one array for (+) strand and one for (-) strand
            self.reads[chrom] = [array(self.DTYPE), array(self.DTYPE)]
            self.read_cov[chrom] = [array(self.DTYPE), array(self.DTYPE)]
        self.reads[chrom][strand].append(fiveendpos)
        self.read_cov[chrom][strand].append(1)
        self.total += 1

    def get_chrom_names(self):
        '''
        return all the chromosome names stored in this track object.
        '''
        l = self.reads.keys()
        l.sort()
        return l
            
    def merge_nonunique(self):
        '''
        Merges all the non-unique reads together and compresses the 
        read arrays.  This also calculates the number of 
        unique/non-unique reads.
        
        Based on MACS code:
        http://liulab.dfci.harvard.edu/MACS/
        Copyright (c) 2007 Tao Liu <taoliu@jimmy.harvard.edu>
        '''
        self.total = 0
        self.total_unique = 0
        # chromosome    
        for chrom in self.reads.keys():
            # strand
            for strand in xrange(self.strands):
                strand_reads = sorted(self.reads[chrom][strand])
                if len(strand_reads) < 1:
                    logging.warning("NO records for chromosome %s, strand %d!" % (chrom, strand))                    
                    new_strand_reads = []
                    new_strand_cov = []
                else:
                    new_strand_reads = array(self.DTYPE, [strand_reads[0]])
                    new_strand_cov = array(self.DTYPE, [1])                    
                    new_strand_index = 0
                    # iterate through reads, eliminating reads with the same start location
                    for chrom_start in strand_reads[1:]:
                        if chrom_start == new_strand_reads[new_strand_index]:
                            try:
                                new_strand_cov[new_strand_index] += 1
                            except OverflowError:
                                logging.warning("> 65535 + strand tags mapped to position %d on chromosome %s!" % (chrom_start, chrom))
                                new_strand_cov[new_strand_index] = 65535
                        else:
                            new_strand_reads.append(chrom_start)
                            new_strand_cov.append(1)
                            new_strand_index += 1
                    # add to total reads
                    self.total_unique += len(new_strand_reads)
                    self.total += sum(new_strand_cov)
                # replace old data 
                self.reads[chrom][strand] = new_strand_reads
                self.read_cov[chrom][strand] = new_strand_cov
        self.well_merged = True

    def merge_strands(self):
        '''
        combine reads from plus and minus strands together
        
        - total_unique will be computed considering reads at the same 
          position but different strands to be different (so up to 2
          reads can occur at each position, one on each strand)
        
        note: this method does not shift the minus strand tags when merging

        Based on MACS code:
        http://liulab.dfci.harvard.edu/MACS/
        Copyright (c) 2007 Tao Liu <taoliu@jimmy.harvard.edu>        
        '''
        # reads must be sorted and merged for this method to work
        if not self.well_merged:
            self.merge_nonunique()
        self.total_unique = None
        self.total = 0

        for chrom in self.reads.keys():
            (plus_reads, minus_reads) = self.reads[chrom]
            (plus_cov, minus_cov) = self.read_cov[chrom]
            merged_reads = array(self.DTYPE, [])
            merged_cov = array(self.DTYPE, [])
            # iterate through sorted plus and minus tags and create
            # single sorted strand
            ip = 0
            im = 0
            lenp = len(plus_reads)
            lenm = len(minus_reads)
            while ip < lenp and im < lenm:
                if plus_reads[ip] < minus_reads[im]:
                    merged_reads.append(plus_reads[ip])
                    merged_cov.append(plus_cov[ip])
                    ip += 1
                else:
                    merged_reads.append(minus_reads[im])
                    merged_cov.append(minus_cov[im])
                    im += 1
            if im < lenm:
                # add rest of minus tags
                merged_reads.extend(minus_reads[im:])
                merged_cov.extend(minus_cov[im:])
            if ip < lenp:
                # add rest of plus tags
                merged_reads.extend(plus_reads[ip:])
                merged_cov.extend(plus_cov[ip:])           
            # convert to a single-stranded track
            self.strands = 1
            self.reads[chrom] = [merged_reads, array(self.DTYPE, [])]
            self.read_cov[chrom] = [merged_cov, array(self.DTYPE, [])]
            # TODO: this is not a well-merged track, because duplicate
            # reads on different strands are not merged into one position
            # reset coverage counts
            self.total += len(merged_reads)   

    def gen_coverage_range_hdf5(self,
                                subgroup,
                                chrom,
                                chrom_start = 0,
                                chrom_end = None,
                                dtype=np.float,
                                **kwargs):
        '''
        subgroup: h5py HDF5 file/subgroup to add data to

        the rest of the arguments are the same as gen_coverage_chrom

        kwargs:
        -------
        compression: h5py compression to use, default 'lzf' (see h5py manual)        
        rest of kwargs are passed to gen_coverage_chrom
        '''        
        default_compression = 'lzf'

        # check chrom_start, chrom_end parameters
        if chrom_end == None:
            # set to end of chromosome
            chrom_end = get_chrom_length(chrom)
        if chrom_start >= chrom_end:            
            logging.error("invalid range: chrom_start %d >= chrom_end %d" % (chrom_start, chrom_end))
            raise ValueError

        # get hdf5 dataset if already exists, otherwise create it
        # using require_dataset provides some additional checking
        # to make sure the dataset is compatible
        # TODO: enclose in try,except block
        dset_len = chrom_end - chrom_start
        compression = kwargs.get('compression', default_compression)
        dset = subgroup.require_dataset(chrom,
                                        (dset_len,),
                                        dtype=dtype,
                                        compression=compression)
        self.gen_coverage_range(chrom, chrom_start, chrom_end, arr=dset,
                                **kwargs)


    def gen_coverage_range(self,
                           chrom,
                           chrom_start=0,
                           chrom_end=None,
                           unique_only=True,
                           read_length_norm=True,
                           reads_per_million=True,
                           arr=None,
                           dtype=np.float,
                           **kwargs):
        '''
        Generate a coverage map from the reads in this track

        chrom: chromosome to process
        
        optional args:
        --------------
        chrom_start, chrom_end: only calculate coverage within this interval
        unique_only: disregard non-unique reads (True/False)        
        read_length_norm: normalize coverage by read length (True/False)
        reads_per_million: normalize coverage by number of reads in library
        arr: array to store coverage data, must have shape
             equal to (chrom_end - chrom_start,).  coverage will be
             stored in the format matching the array 'dtype' attribute
        dtype: datatype of coverage array.  only used if 'arr' is None

        kwargs:
        -------        
        chunk_size: size of array to use as cache in memory (default 1e7)        
        '''
        default_chunk_size = 1e7

        # sort and merge non-unique reads
        if not self.well_merged:
            self.merge_nonunique()

        # check chrom_start, chrom_end parameters
        if chrom_end == None:
            # set to end of chromosome
            chrom_end = get_chrom_length(chrom)
        if chrom_start >= chrom_end:            
            logging.error("invalid range: chrom_start %d >= chrom_end %d" % (chrom_start, chrom_end))
            raise ValueError            

        # allocate array if not passed in
        if arr is None:
            arr = np.zeros((chrom_end - chrom_start,), dtype=dtype)
        else:
            assert arr.shape == (chrom_end - chrom_start,)

        # normalize to read length
        if read_length_norm == True:
            increment = 1.0 / float(self.read_length)
        else:
            increment = 1
        # normalize to reads per million reads in library
        if reads_per_million == True:
            if unique_only:
                increment *= (1e6 / float(self.total_unique))
            else:
                increment *= (1e6 / float(self.total))    
        
        # get references to arrays and properties
        chunk_size = kwargs.get('chunk_size', default_chunk_size)

        # process each strand separately
        for strand in xrange(self.strands):
            chrom_reads = self.reads[chrom][strand]
            chrom_cov = self.read_cov[chrom][strand]
            # call function to pileup coverage        
            _process_reads_cached(arr, chrom_reads, chrom_cov, 
                                  chrom_start, chrom_end, 
                                  self.read_length, increment, 
                                  unique_only=unique_only, 
                                  chunk_size=chunk_size)
            #_process_reads_cached_nochunk(arr, chrom_reads, chrom_start, chrom_end, self.read_length, increment)
        return arr

def _process_reads_cached(arr, chrom_reads, chrom_cov, 
                          chrom_start, chrom_end, 
                          read_length, increment,
                          unique_only=True, 
                          chunk_size=1e7):
    assert chunk_size > read_length
    # figure out the boundaries of the first chunk
    chunk_bounds = (chrom_start,
                    min(chrom_start + chunk_size, chrom_end))
    # allocate an array to store the largest possible chunk
    chunk_data = np.zeros((chunk_bounds[1] - chunk_bounds[0] + read_length,), dtype=arr.dtype)
    chunk_dirty = False

    # "fast-forward" to find index at chrom_start
    read_index = 0
    while read_index < len(chrom_reads):
        read_start = chrom_reads[read_index]
        if read_start > chrom_start:
            # found the first read after the chrom_start, so bail out of 
            # this loop
            break
        if (read_start + read_length) > chrom_start:
            # this read begins outside the desired range but ends
            # inside it, so capture this coverage
            read_cov = increment if unique_only else (chrom_cov[read_index] * increment)
            chunk_data[0:read_start + read_length - chunk_bounds[0]] += read_cov
            chunk_dirty = True
        read_index += 1
    
    # iterate through reads to build the coverage data
    for read_index in xrange(read_index, len(chrom_reads)):
        read_start = chrom_reads[read_index]
        if read_start >= chrom_end:
            break
        
        while read_start >= chunk_bounds[1]:
            if chunk_dirty:
                # add chunk to hdf5 dataset        
                arr[chunk_bounds[0]-chrom_start:chunk_bounds[1]-chrom_start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
                # shift end of chunk to beginning of next chunk and clear rest of array
                chunk_data[0:read_length] = chunk_data[-read_length:]
                chunk_data[read_length:] = 0
                # check if chunk no longer dirty
                chunk_dirty = chunk_data[0:read_length].any()
            # get next chunk
            chunk_bounds = (chunk_bounds[0] + chunk_size,
                            min(chunk_bounds[1] + chunk_size, chrom_end))
        # add coverage from read
        read_cov = increment if unique_only else (chrom_cov[read_index] * increment)
        chunk_data[read_start - chunk_bounds[0]:read_start + read_length - chunk_bounds[0]] += read_cov
        chunk_dirty = True
    # flush last chunk
    if chunk_dirty:
        arr[chunk_bounds[0]-chrom_start:chunk_bounds[1]-chrom_start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
    # delete chunk array
    del chunk_data

def _process_reads_cached_old(arr, chrom_reads, chrom_start, chrom_end, read_length, increment, chunk_size=1e7):
    assert chunk_size > read_length
    # figure out the boundaries of the first chunk
    chunk_bounds = (chrom_start,
                    min(chrom_start + chunk_size, chrom_end))
    # allocate an array to store the largest possible chunk
    chunk_data = np.zeros((chunk_bounds[1] - chunk_bounds[0] + read_length,), dtype=arr.dtype)
    chunk_dirty = False

    # "fast-forward" to find index at chrom_start
    read_index = 0
    while read_index < len(chrom_reads):
        read_start = chrom_reads[read_index]
        if read_start > chrom_start:
            # found the first read after the chrom_start, so bail out of 
            # this loop
            break
        if (read_start + read_length) > chrom_start:
            # this read begins outside the desired range but ends
            # inside it, so capture this coverage
            chunk_data[0:read_start + read_length - chunk_bounds[0]] += increment
            chunk_dirty = True
        read_index += 1
    
    # iterate through reads to build the coverage data
    for read_index in xrange(read_index, len(chrom_reads)):
        read_start = chrom_reads[read_index]
        if read_start >= chrom_end:
            break
        
        while read_start >= chunk_bounds[1]:
            if chunk_dirty:
                # add chunk to hdf5 dataset        
                arr[chunk_bounds[0]-chrom_start:chunk_bounds[1]-chrom_start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
                # shift end of chunk to beginning of next chunk and clear rest of array
                chunk_data[0:read_length] = chunk_data[-read_length:]
                chunk_data[read_length:] = 0
                # check if chunk no longer dirty
                chunk_dirty = chunk_data[0:read_length].any()
            # get next chunk
            chunk_bounds = (chunk_bounds[0] + chunk_size,
                            min(chunk_bounds[1] + chunk_size, chrom_end))
        # add coverage from read
        chunk_data[read_start - chunk_bounds[0]:read_start + read_length - chunk_bounds[0]] += increment
        chunk_dirty = True
    # flush last chunk
    if chunk_dirty:
        arr[chunk_bounds[0]-chrom_start:chunk_bounds[1]-chrom_start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
    # delete chunk array
    del chunk_data


def _process_reads_cached_nochunk(arr, chrom_reads, chrom_start, chrom_end, read_length, increment):
    covarray = np.zeros((chrom_end - chrom_start,), dtype=arr.dtype)
    # "fast-forward" to find index at chrom_start
    read_index = 0
    while read_index < len(chrom_reads):
        read_start = chrom_reads[read_index]
        if read_start <= chrom_start:
            if (read_start + read_length) > chrom_start:
                # this read begins outside the desired range but ends
                # inside it, so capture this coverage                
                covarray[0:read_start + read_length - chrom_start] += increment
        else:
            # found the true chrom_start, so bail out of this loop and start
            # a more efficient one without the bounds check
            break
        read_index += 1

    # iterate through reads to build the coverage data
    for read_index in xrange(read_index, len(chrom_reads)):
        read_start = chrom_reads[read_index]
        if read_start >= chrom_end:
            break
        start = read_start - chrom_start
        end = min(read_start + read_length - chrom_start, chrom_end)
        covarray[start:end] += increment
    # add covarray to input array 
    arr[:] += covarray
    # delete covarray
    del covarray 

def gen_fw_read_track_from_alignments(reader, read_length=None, track=None):    
    seqlen = None
    linenum = 0
    chunknum = 0
    chunksize = 100000

    for alignment in reader:
        # count lines for status updates
        linenum += 1
        if linenum == chunksize:
            chunknum += 1
            logging.debug("    %d" % (chunknum * chunksize))
            linenum = 0
        # unpack alignment
        chrom, chrom_start, chrom_end, strand = alignment
        # initial setup
        if seqlen == None:
            seqlen = chrom_end - chrom_start
        if read_length == None:
            read_length = track.read_length if track != None else seqlen
        if track == None:
            track = FixedWidthReadTrack(read_length)
        elif read_length != track.read_length:
            logging.critical("read_length=%d mismatches dataset=%d" % 
                             (read_length, track.read_length))
            raise ValueError
        # process read
        if not maps_to_genome(chrom):
            continue
        if chrom == 'chrM':
            continue
        if strand == 'F' or strand == '+':
            track.add(chrom, chrom_start, 0)
        elif strand == 'R' or strand == '-':
            shiftedpos = max(0, chrom_start + seqlen - read_length)
            track.add(chrom, shiftedpos, 1)
        else:
            logging.critical("strand is not 'F' or 'R'")
            raise ValueError("strand is not 'F' or 'R'")
    return track