'''
Created on Nov 12, 2009

@author: mkiyer
'''
import logging
import numpy as np
import pysam
import os
import sys
import collections
import operator
import tables
from veggie.db.sample import get_sampledb_instance
import veggie.db.sample.sampledb3 as sdb

def pileup2array(pileup_iterator, arr,
                 dtype=np.uint32):
    import array
    start = None
    end = None
    chunk_arr = array.array('I')

    for pileupcolumn in pileup_iterator:
        pos = pileupcolumn.pos        
        if start == None:
            start, end = pos, pos
        else:
            if end != pos:
                arr[start:end] = np.array(chunk_arr, dtype=dtype)
                del chunk_arr
                chunk_arr = array.array('I')
                start, end = pos, pos
        end += 1
        chunk_arr.append(pileupcolumn.n)
    # flush remaining array elements
    if len(chunk_arr) > 0:
        arr[start:(start + len(chunk_arr))] = np.array(chunk_arr, dtype=dtype)

class Junction(tables.IsDescription):
    start = tables.UInt32Col()
    end = tables.UInt32Col()
    strand = tables.StringCol(1, dflt='.')
    n = tables.UInt32Col()

def reads2junctions(read_iterator):
    # enumerate the different CIGAR format operations based on the SAM format
    # specification
    MATCH = 0
    INSERTION = 1
    DELETION = 2
    SKIP = 3
    SOFTCLIP = 4
    HARDCLIP = 5
    PADDING = 6
    juncs = collections.defaultdict(lambda: 0)
    jstart, jend, jstrand, jcount = None, None, None, 0
    for read in read_iterator:
        pos = 0
        # read the individual fields of the cigar string
        for cigar_op in read.cigar:
            op, length = cigar_op
            # skips in the alignment are considered splice junctions
            if op == SKIP:
                start, end, strand = read.pos + pos, read.pos + pos + length, read.opt('XS')
                # assuming alignments are sorted, count alignments that
                # hit at exactly this position
                if not jstart:
                    jstart, jend, jstrand = start, end, strand
                elif (jstart != start) or (jend != end):
                    juncs[(jstart, jend, jstrand)] += jcount
                    jstart, jend, jstrand, jcount = start, end, strand, 0
                jcount += 1
            else:
                pos += length
    # add any remaining junctions to the list
    if jcount > 0:
        juncs[(jstart, jend, jstrand)] += jcount
    return sorted(juncs.items())

def fetch_junctions(group, chrom, istart, iend, norm=True):
    table = group.junctions._f_getChild(chrom)
    if norm:
        reads = group._v_attrs['reads']
        read_length = group._v_attrs['read_length'] 

    res = []
    for row in table.where('((start >= istart) & (end < iend))'):
        if norm:
            n = row['n'] * (PileupDB.norm_factor / (reads * read_length))
        else:
            n = row['n']
        res.append((row['start'], row['end'], row['strand'], n))
    return res
#    res = [(row['start'], row['end'], row['strand'], row['n']) for row in table
#           if (row['start'] >= istart) and (row['end'] < iend)]            

def calc_coverage(group, chrom, start, end, norm=True, rpkm=False):
    if rpkm == True:
        norm = True
    arr = group.coverage._f_getChild(chrom)[start:end]
    if norm:
        reads = group._v_attrs['reads']
        read_length = group._v_attrs['read_length']            
        arr = arr * (PileupDB.norm_reads / (reads * read_length))
    if rpkm:
        arr = (PileupDB.norm_read_length / (end - start)) * np.sum(arr)
    return arr

class PileupDBQuery(object):    

    def __init__(self):        
        self.samples = None
        self.groups = None
        self.h5groups = None
        self.sample_lookup = None

    def to_rpkm(self, arr):
        return PileupDB.norm_read_length * np.average(arr)
        #return (PileupDB.norm_read_length / (end - start)) * np.sum(sample_arr)
        
    def itersamples(self):
        return self.sample_lookup.iteritems()
    
    def get_sample_indexes(self, sample):
        return self.sample_lookup[sample]

    def coverage(self, chrom, start, end, norm=True, rpkm=False):
        if rpkm == True:
            result = np.zeros(len(self.h5groups), dtype=np.float)
        else:
            result = np.zeros((len(self.h5groups), end-start), dtype=np.float)        
        for i, group in enumerate(self.h5groups):
            result[i] = calc_coverage(group, chrom, start, end, norm=norm, rpkm=rpkm)
        return result

    def sample_coverage(self, chrom, start, end, norm=True, rpkm=False):
        finished = set()
        if rpkm == True:
            result = np.zeros(len(self.sample_lookup.keys()), dtype=np.float)
        else:
            result = np.zeros((len(self.sample_lookup.keys()), end-start), dtype=np.float)
        i = 0
        for sample_name in self.samples:
            if sample_name in finished:
                continue
            result[i] = self._calc_sample_coverage(sample_name, chrom, start, end, norm, rpkm)
            i += 1
        return result
    
    def _calc_sample_coverage(self, sample_name, chrom, start, end, norm=True, rpkm=False):
        indexes = self.sample_lookup[sample_name]
        sample_arr = np.zeros(end-start, dtype=np.float)        
        sample_reads = 0
        for index in indexes:
            h5group = self.h5groups[index]
            read_length = h5group._v_attrs['read_length']
            group_arr = (h5group.coverage._f_getChild(chrom)[start:end] / float(read_length))
            sample_arr += group_arr
            sample_reads += h5group._v_attrs['reads']
        if norm or rpkm:
            sample_arr = sample_arr * (PileupDB.norm_reads / float(sample_reads))
            if rpkm:
                sample_arr = (PileupDB.norm_read_length / (end - start)) * np.sum(sample_arr)
        return sample_arr
    
    def junctions(self, chrom, start, end, norm=True):
        result = []
        for group in self.h5groups:
            result.append(fetch_junctions(group, chrom, start, end, norm=norm))
        return result
    
    def iterjunctions(self, norm=True):
        for h5group in self.h5groups:
            norm_factor = 1.0
            if norm:
                reads = h5group._v_attrs['reads']
                read_length = h5group._v_attrs['read_length']
                norm_factor = (PileupDB.norm_factor / float(reads * read_length))
            for chrom_tbl in h5group.junctions:
                for row in chrom_tbl:
                    yield (h5group._v_name, chrom_tbl._v_name, row['start'], row['end'], row['strand'], row['n'] * norm_factor)


class PileupQuery(object):    

    # normalization factors for coverage calculation
    norm_reads = 1.0e6
    norm_interval_length = 1.0e3
    norm_rpkm = norm_reads * norm_interval_length

    def __init__(self, sample_names, phdb_path, sample_library_map):
        # init variables
        self.h5files = []
        self.h5groups = []
        self.sample_h5group_map = {}
        self.library_id_h5group_map = {}
        self.library_id_index_map = {}
        self.sample_indexes_map = {}
        self.index_sample_names = []
        self.ordered_sample_names = []
        self.ordered_sample_indexes = []
        self.h5group_reads = []
        self.h5group_read_lengths = []  
        # build query
        self._init_query(sample_names, phdb_path, sample_library_map)

    def close(self):
        for h5file in self.h5files:
            h5file.close()
    
    def get_library_id(self, index):
        return self.h5groups[index]._v_name
    def get_library_index(self, library_id):
        return self.library_id_index_map[library_id]
    def get_sample_name(self, index):
        return self.index_sample_names[index]
    def get_sample_indexes(self, sample_name):
        return self.sample_indexes_map[sample_name]
    def itersamples(self):
        return self.sample_indexes_map.iteritems()

    def _init_query(self, sample_names, phdb_path, sample_library_map):
        logger = logging.getLogger(self.__class__.__name__)        
        sample_indexes_map = collections.defaultdict(lambda: [])
        sample_h5group_map = collections.defaultdict(lambda: [])
        h5group_reads = []
        h5group_read_lengths = []
        current_index = 0
        for sample_name in sample_names:
            # get the libraries associated with this sample
            if sample_name not in sample_library_map:
                logger.error('sample %s not found' % sample_name)
                continue
            library_ids = sample_library_map[sample_name]
            for library_id in library_ids:
                # locate phdb file
                h5file = self._open_library_phd_file(library_id, phdb_path)
                if h5file == None:
                    logger.error('library %s not found' % library_id)
                    continue
                if not (('/' + library_id) in h5file):                    
                    logger.error('library %s not found in database' % library_id)
                    h5file.close()
                else:
                    h5group = h5file.getNode(h5file.root, library_id)
                    # add file
                    self.h5files.append(h5file)
                    self.h5groups.append(h5group)
                    # add lookup information
                    self.index_sample_names.append(sample_name)
                    if ((len(self.ordered_sample_names) == 0) or 
                        self.ordered_sample_names[-1] != sample_name):
                        self.ordered_sample_names.append(sample_name)
                    # library id lookup
                    self.library_id_h5group_map[library_id] = h5group
                    self.library_id_index_map[library_id] = current_index
                    # sample name lookup
                    sample_h5group_map[sample_name].append(h5group)
                    sample_indexes_map[sample_name].append(current_index)
                    # group attributes
                    h5group_reads.append(h5group._v_attrs['reads'])
                    h5group_read_lengths.append(h5group._v_attrs['read_length'])
                    current_index += 1
        # convert defaultdict to dict
        self.sample_h5group_map = dict(sample_h5group_map)
        self.sample_indexes_map = dict(sample_indexes_map)
        # optimize sample name -> index lookups using index arrays
        for sample_name in self.ordered_sample_names:
            # convert indexes to numpy array
            sample_indexes = np.array(self.sample_indexes_map[sample_name])
            # store as numpy array
            self.sample_indexes_map[sample_name] = sample_indexes
            self.ordered_sample_indexes.append(sample_indexes)
        # convert python list to numpy array for fast indexing
        self.h5group_reads = np.array(h5group_reads)
        self.h5group_read_lengths = np.array(h5group_read_lengths)
    
    def _open_library_phd_file(self, library_id, phdb_path):
        filename = os.path.join(phdb_path, library_id + '.phd')   
        h5file = None
        if os.path.exists(filename):
            if not tables.isHDF5File(filename):
                raise OSError('file %s is not an HDF5 file' % filename)
            if not tables.isPyTablesFile(filename):
                raise OSError('file %s is not a PyTables file' % filename)    
            h5file = tables.openFile(filename, 'r')
        return h5file

    @staticmethod
    def to_rpkm(arr):
        return PileupQuery.norm_interval_length * np.average(arr)

    @staticmethod
    def to_rpkm_2d(arr2d):
        '''rows of 2D-array are libraries/samples and columns are genomic positions'''
        return PileupQuery.norm_interval_length * np.average(arr2d, axis=1)

    @staticmethod
    def _calc_coverage(h5group, chrom, start, end, norm):
        arr = h5group.coverage._f_getChild(chrom)[start:end]
        if norm:
            reads = h5group._v_attrs['reads']
            read_length = h5group._v_attrs['read_length']            
            arr = arr * (PileupQuery.norm_reads / (reads * read_length))
        return arr

    @staticmethod
    def _calc_coverage_norm_read_length(h5group, read_length, chrom, start, end):
        arr = (h5group.coverage._f_getChild(chrom)[start:end] / float(read_length))
        return arr

    @staticmethod
    def _calc_coverage_norm_interval_length(h5group, chrom, start, end):
        read_length = h5group._v_attrs['read_length']
        norm_factor = PileupQuery.norm_interval_length / (read_length)
        return norm_factor * np.average(h5group.coverage._f_getChild(chrom)[start:end])

    @staticmethod
    def _calc_coverage_rpkm(h5group, chrom, start, end):
        reads = h5group._v_attrs['reads']
        read_length = h5group._v_attrs['read_length']
        rpkm_norm = PileupQuery.norm_rpkm / (reads * read_length)
        return rpkm_norm * np.average(h5group.coverage._f_getChild(chrom)[start:end])

    def coverage(self, chrom, start, end, norm=True):
        """Coverage map for each library"""
        result = np.empty((len(self.h5groups), end-start), dtype=np.float)
        for i, h5group in enumerate(self.h5groups):
            result[i] = self._calc_coverage(h5group, chrom, start, end, norm)
        #result[:] = [self.calc_coverage(h5group, chrom, start, end, norm) for h5group in self.h5groups]
        return result

    def batch_coverage(self, intervals, arr2d, norm=True):
        '''Length-normalized coverage'''
        for j, h5group in enumerate(self.h5groups):
            for i, interval in enumerate(intervals):
                chrom, start, end = interval
                arr = self._calc_coverage(h5group, chrom, start, end, norm)
                rpkm = self.to_rpkm(arr)
                arr2d[i,j] = rpkm

    def _calc_sample_coverage(self, indexes, chrom, start, end, norm):
        # add up coverage for all the sample's groups
        sample_arr = np.zeros((end - start), dtype=np.float)
        for index in indexes:
            sample_arr += self._calc_coverage_norm_read_length(self.h5groups[index], 
                                                               self.h5group_read_lengths[index],
                                                               chrom, start, end)
        if norm:
            sample_reads = np.sum(self.h5group_reads[indexes])
            sample_arr = sample_arr * (PileupQuery.norm_reads / float(sample_reads))
        return sample_arr
                    
    def sample_coverage(self, chrom, start, end, norm=True):
        """Coverage map for each sample"""
        # get library coverage with read length normalization
        sample_arrs = np.empty((len(self.ordered_sample_names), end-start), dtype=np.float)
        for i, indexes in enumerate(self.ordered_sample_indexes):
            sample_arrs[i] = self._calc_sample_coverage(indexes, chrom, start, end, norm)
        return sample_arrs

    def batch_sample_coverage(self, intervals, arr2d, norm=True):
        for j, indexes in enumerate(self.ordered_sample_indexes):
            for i, interval in enumerate(intervals):
                chrom, start, end = interval
                sample_arr = self._calc_sample_coverage(indexes, chrom, start, end, norm)
                rpkm = self.to_rpkm(sample_arr)
                arr2d[i,j] = rpkm
        return arr2d

    def batch_sample_coverage2(self, intervals, arr2d, norm=True):
        for j, indexes in enumerate(self.ordered_sample_indexes):
            print j, indexes
            # get the coverage for all intervals one individual group at a time
            h5group_arrays = np.empty((len(intervals), len(indexes)), dtype=np.float)
            for k, index in enumerate(indexes):
                print k, index
                h5group = self.h5groups[index]
                # compute coverage for all intervals
                for i, interval in enumerate(intervals):
                    chrom, start, end = interval
                    cov = self._calc_coverage_norm_interval_length(h5group, chrom, start, end)
                    h5group_arrays[i,k] = cov
            # now perform the final summation and normalization
            if norm:
                sample_reads = np.sum(self.h5group_reads[indexes])
                norm_factor = PileupQuery.norm_reads / float(sample_reads)
            else:
                norm_factor = 1.0
            arr2d[:,j] = norm_factor * np.sum(h5group_arrays, axis=1)
        return arr2d    
    
    def junctions(self, chrom, start, end, norm=True):
        result = []
        for group in self.h5groups:
            result.append(fetch_junctions(group, chrom, start, end, norm=norm))
        return result
    
    def iterjunctions(self, norm=True):
        for h5group in self.h5groups:
            norm_factor = 1.0
            if norm:
                reads = h5group._v_attrs['reads']
                read_length = h5group._v_attrs['read_length']
                norm_factor = (PileupQuery.norm_factor / float(reads * read_length))
            for chrom_tbl in h5group.junctions:
                for row in chrom_tbl:
                    yield (h5group._v_name, chrom_tbl._v_name, row['start'], row['end'], row['strand'], row['n'] * norm_factor)


class PileupDB(object):
    
    # default datatype
    default_numpy_dtype = np.uint32
    default_atom = tables.UInt32Atom()
    # default compression library to use
    default_complevel = 1
    default_complib = 'lzo'
    default_filters = tables.Filters(complevel=default_complevel, 
                                     complib=default_complib,
                                     shuffle=True)
    # normalization factors for coverage calculation
    norm_reads = 1.0e6
    norm_read_length = 1.0e3
    norm_factor = norm_reads * norm_read_length

    def __init__(self, filename, mode='a', sdbi_path=None):
        '''
        mode
        ----
        'r'  Read-only; no data can be modified.
        'w'  Write; a new file is created (an existing file with the same name would be deleted).
        'a'  Append; an existing file is opened for reading and writing, and if the file does not exist it is created.
        'r+' It is similar to 'a', but the file must already exist.
        '''
        if mode is not 'w':
            if os.path.exists(filename):
                if not tables.isHDF5File(filename):
                    raise OSError('file %s is not an HDF5 file' % filename)
                if not tables.isPyTablesFile(filename):
                    raise OSError('file %s is not a PyTables file' % filename)    
        self.h5file = tables.openFile(filename, mode)
        self.sdbi_path = sdbi_path

    def close(self):
        self.h5file.close()

    def query_coverage(self, group_name, chrom, start, end, norm=True, rpkm=False):
        return calc_coverage(self.h5file.getNode(self.h5file.root, group_name), 
                             chrom, start, end, norm, rpkm)
        
    def query(self, sample_names, qc_filter=True):
        samples, groups, h5groups = self._make_query_from_samples(sample_names, qc_filter=qc_filter)
        pileupquery = PileupDBQuery()
        pileupquery.samples = samples
        pileupquery.groups = groups
        pileupquery.h5groups = h5groups
        sample_lookup = collections.defaultdict(lambda: [])
        for i, s in enumerate(pileupquery.samples):
            sample_lookup[s].append(i)
        pileupquery.sample_lookup = sample_lookup
        return pileupquery
    
    def _make_query_from_samples(self, samples, qc_filter=True):
        h5groups = []
        sample_names = []
        group_names = []
        logger = logging.getLogger(self.__class__.__name__)            
        # load the current sampledb snapshot
        logging.debug("Loading SampleDB instance at %s" %
                      os.path.abspath(self.sdbi_path))
        sdbi = sdb.SampleDBInstance.load(self.sdbi_path)
        #sdbi = get_sampledb_instance(self.sdbi_path)
        for sample_name in samples:
            # get the libraries associated with this sample
            libraries = sdbi.get_libraries_by_sample_name(sample_name, best=True)
            for library in libraries:
                if library.qc_status == False:
                    if qc_filter:
                        #logger.info('library %s: marked QC-FAIL excluded from query' % library.id)
                        continue
                if ('/' + library.id) in self.h5file:
                    sample_names.append(sample_name)
                    group_names.append(library.id)
                    h5groups.append(self.h5file.getNode(self.h5file.root, library.id))
                else:
                    logger.error('library %s not found in database' % library.id)
        return sample_names, group_names, h5groups
    
        
    def insert_sam(self, inbamfile,
                   group_name=None,
                   references=None):
        logger = logging.getLogger(self.__class__.__name__)
        # handle input parameters
        if group_name is None:
            parent_group = self.h5file.root
        elif ('/' + group_name) in self.h5file:
            logger.warning('h5file group %d already exists, data may be overwritten' % group_name)
            parent_group = self.h5file.getNode(group_name)
        else:
            parent_group = self.h5file.createGroup(self.h5file.root, group_name)
        # create a groups
        cov_group = self.h5file.createGroup(parent_group, 'coverage')
        junc_group = self.h5file.createGroup(parent_group, 'junctions')
        # open the bam file and read the header
        bamfile = pysam.Samfile(inbamfile, 'rb')
        refs = bamfile.references
        lengths = bamfile.lengths
        # create the individual chunked arrays
        for ref, length in zip(refs, lengths):
#        for ref, length in [('chr1', 247249719)]:
            logger.debug("adding ref %s to database" % (ref))
            # TODO: experiment with chunkshape
            ca = self.h5file.createCArray(cov_group, ref, 
                                          atom=self.default_atom, 
                                          shape=(length,), 
                                          filters=self.default_filters, 
                                          chunkshape=None)
            # now insert the reads from the BAM file into the chunked array
            pileup2array(bamfile.pileup(ref), ca,
                         dtype=ca.atom.dtype)
            # get the splice junction reads from the bamfile
            juncs = reads2junctions(bamfile.fetch(ref))
            # add the splice reads to a table
            junc_tbl = self.h5file.createTable(junc_group, ref, Junction, 
                                               expectedrows=len(juncs))
            njuncs = 0
            for junc in juncs:
                j, count = junc
                start, end, strand = j
                njuncs += count
                junc_row = junc_tbl.row
                junc_row['start'] = start
                junc_row['end'] = end
                junc_row['strand'] = strand
                junc_row['n'] = count
                junc_row.append()
            junc_tbl.flush()

        # count reads and get other info from BAM file
        reads = 0
        read_lengths = collections.defaultdict(lambda: 0)
        for read in bamfile.fetch():
            reads += 1
            read_lengths[read.rlen] += 1
        # done with BAM file
        bamfile.close()
        # set attributes for group
        parent_group._v_attrs.reads = reads
        parent_group._v_attrs.read_length_variable = True if len(read_lengths) > 1 else False
        # set to the most abundant read length in the case of variable read length runs
        parent_group._v_attrs.read_length = int(sorted(read_lengths.items(), key=operator.itemgetter(1), reverse=True)[0][0])
        parent_group._v_attrs.read_length_distribution = dict(read_lengths)
        # number of splice junctions
        parent_group._v_attrs.junctions = njuncs
        return parent_group


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    phdb = PileupDB('utta.phd', 'w')
    phdb.insert_sam(sys.argv[1], group_name="huda")
    phdb.close()

#def pileup_reads(arr, read_iterator, 
#                 chrom_start, chrom_end, 
#                 unique_only=True, 
#                 dtype=np.uint32,
#                 max_rlen=2048,
#                 chunk_size=8192):
#    '''
#    don't use this function because it does not pileup spliced reads properly
#    '''
##    assert chunk_size > max_rlen
#    assert chrom_end > chrom_start
#    # figure out the boundaries of the first chunk
#    chunk_bounds = (chrom_start,
#                    min(chrom_start + chunk_size, chrom_end))
#    # allocate an array to store the largest possible chunk
#    chunk_data = np.zeros((chunk_bounds[1] - chunk_bounds[0] + max_rlen,), dtype=dtype)
#    chunk_dirty = False
#    # iterate through reads
#    for read in read_iterator:
#        # ignore duplicate reads
#        if unique_only and read.is_duplicate:
#            continue            
#        # get attributes from AlignedRead object
#        read_start = read.pos
#        read_length = read.rlen
#        assert read_length < max_rlen
#        # only consider reads that align within the desired region
#        if read_start >= chrom_end:
#            break
#        if (read_start + read_length) > chrom_start:
#            # if the read starts after the end of the current chunk, need to write the 
#            # chunk and shift to the next chunk
#            while read_start >= chunk_bounds[1]:
#                if chunk_dirty:
#                    # add chunk to hdf5 dataset        
#                    arr[chunk_bounds[0]-chrom_start:chunk_bounds[1]-chrom_start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
#                    # shift end of chunk to beginning of next chunk and clear rest of array
#                    chunk_data[0:max_rlen] = chunk_data[-max_rlen:]
#                    chunk_data[max_rlen:] = 0
#                    # check if chunk no longer dirty
#                    chunk_dirty = chunk_data[0:max_rlen].any()
#                # get next chunk
#                chunk_bounds = (chunk_bounds[0] + chunk_size,
#                                min(chunk_bounds[1] + chunk_size, chrom_end))
#            # add coverage from the current read
#            chunk_data[np.max(0, read_start - chunk_bounds[0]):read_start + read_length - chunk_bounds[0]] += 1
#            chunk_dirty = True
#    # flush last chunk
#    if chunk_dirty:
#        arr[chunk_bounds[0]-chrom_start:chunk_bounds[1]-chrom_start] += chunk_data[0:(chunk_bounds[1]-chunk_bounds[0])]
#    # delete chunk array
#    del chunk_data

#def picard_create_sequence_dict():
#    from veggie.app.alignment import sw_root
#    picard_root = os.path.join(sw_root, 'picard', 'current')
#    java_cmd = ['java', '-Xmx2g', '-jar', ]
#    dict_args = ['REFERENCE=%s' % ('/lab/mkiyer/refdb/alignment/references/bowtie/hg18_illumina/hg18_illumina.fa'),
#                 'OUTPUT=hg18_illumina.dict']
#    dict_cmd = []
#    dict_cmd.extend(java_cmd)
#    dict_cmd.append(os.path.join(picard_root, 'CreateSequenceDictionary.jar'))
#    dict_cmd.extend(dict_args)
#    retcode = subprocess.call(dict_cmd)
#    if retcode != 0:
#        raise PicardError("Picard <%s> reported an error" % (' '.join(dict_cmd)))
    
#    def _prepare_bam(self, library_id, samfile_name,
#                     unique_only=True):
#        bamfile = picard_prepare_bam(samfile_name)
#        # index the sorted BAM file
#        index_file = bamfile + '.bai'
#        if os.path.exists(index_file):
#            logging.debug('%s: indexed BAM file %s exists' % (library_id, index_file))
#        else:
#            logging.debug('%s: indexing BAM file' % (library_id))
#            pysam.index(bamfile)
#        return bamfile
#        
#    def _samtools_prepare_bam(self, library_id, samfile_name,
#                              unique_only=True, 
#                              reflistfile_name=None):
#        # SAMtools requires a reference list file in order to convert
#        # a SAM file to BAM format.  The reference information may be
#        # stored in the SAM header, in which case a reference list file
#        # is not necessary
#        if reflistfile_name is None:
#            # open the SAM file with pysam
#            samfile = pysam.Samfile(samfile_name, 'r')            
#            if len(samfile.references) == 0:
#                logging.error("%s: No references found in SAM file.." % (library_id))
#                return None
#            refs = samfile.references
#            lengths = samfile.lengths
#            # we are done with the samfile now
#            samfile.close()
#            # create a file to use as the reflist for SAMtools
#            reflistfile_name = os.path.splitext(samfile_name)[0] + '.ref_list'
#            reflistfile = open(reflistfile_name, 'w')
#            # write the reference lengths 
#            for ref, length in zip(refs, lengths):
#                # add this reference to the reflist file
#                reflistfile.write('%s\t%d\n' % (ref, length))
#            reflistfile.close()
#        
#        # convert the SAM file to BAM format in order to sort, remove
#        # duplicate reads, and call pileup
#        bamfile_name = os.path.splitext(samfile_name)[0] + '.bam'
#        if os.path.exists(bamfile_name):
#            logging.debug('%s: BAM file exists' % (library_id))
#        else:
#            # convert SAM to BAM
#            logging.debug('%s: converting SAM to BAM' % (library_id))
#            # TODO: collect stdout
#            retcode = subprocess.call(["samtools", "import", reflistfile_name, samfile_name, bamfile_name])
#            if retcode != 0:
#                raise SamtoolsError("%s: <samtools import> reported an error" % (library_id))
#            #try:
#            #    pysam.samimport(reflistfile_name, samfile_name, bamfile_name)
#            #except pysam.SamtoolsError:
#            #    logging.error('%s: Samtools reported error %s' % (library_id, 'hi'))
#        
#        # sort the BAM file
#        sorted_bamfile_prefix = os.path.splitext(bamfile_name)[0] + '.sorted'
#        if os.path.exists(sorted_bamfile_prefix + '.bam'):
#            logging.debug("%s: sorted BAM file exists" % (library_id))
#        else:
#            logging.debug('%s: sorting BAM file' % (library_id))
#            retcode = subprocess.call(["samtools", "sort", bamfile_name, sorted_bamfile_prefix])
#            if retcode != 0:
#                raise SamtoolsError("%s: <samtools sort> reported an error" % (library_id))
#        # for some reason the sort function automatically adds '.bam'
#        current_bamfile_name = sorted_bamfile_prefix + '.bam'
#        
#        # run 'fixmate' to correct insert size and mate information
#        fixmate_bamfile_name = os.path.splitext(current_bamfile_name)[0] + '.fixmate.bam'
#        if os.path.exists(fixmate_bamfile_name):
#            logging.debug("%s: fixmate BAM file exists" % (library_id))
#        else:
#            logging.debug('%s: running fixmate on BAM file' % (library_id))
#            retcode = subprocess.call(["samtools", "fixmate", current_bamfile_name, fixmate_bamfile_name])
#            if retcode != 0:
#                raise SamtoolsError("%s: <samtools fixmate> reported an error" % (library_id))
#        current_bamfile_name = fixmate_bamfile_name
#        
#        # remove duplicate reads (if user sets this parameter)
#        if unique_only:
#            unique_bamfile_name = os.path.splitext(bamfile_name)[0] + '.unique.bam'
#            if os.path.exists(unique_bamfile_name):
#                logging.debug("%s: unique sorted BAM file exists" % (library_id))
#            else:
#                logging.debug('%s: removing unique reads' % (library_id))
#                # TODO: use Picard here because TopHat does not have the proper information
#                retcode = subprocess.call(["samtools", "rmdup", current_bamfile_name, unique_bamfile_name])
#                if retcode != 0:
#                    raise SamtoolsError("%s: <samtools rmdup> reported an error" % (library_id))                
#            current_bamfile_name = unique_bamfile_name
#            
#        # index the sorted BAM file
#        if os.path.exists(current_bamfile_name + '.bai'):
#            logging.debug('%s: indexed BAM file exists' % (library_id))
#        else:
#            logging.debug('%s: indexing BAM file' % (library_id))
#            pysam.index(current_bamfile_name)
#        # we now have prepared a BAM file for analysis
#        return current_bamfile_name
    
#    def _insert_pileup(self, h5group, bamfile_name):
#        print 'starting process'
#        p = subprocess.Popen(['samtools', 'pileup', bamfile_name], shell=False, 
#                             stdin=subprocess.PIPE, 
#                             stdout=subprocess.PIPE, 
#                             stderr=subprocess.PIPE,
#                             close_fds=True)
#        debug_every = 100000    
#        for linenum, line in enumerate(p.stdout):
#            if linenum % debug_every == 0:
#                logging.debug('\t%d pileup columns processed' % linenum)
#            fields = line.strip().split('\t')
#            ref = fields[0]
#            pos = int(fields[1]) - 1
#            n = int(fields[3])
#            h5group._f_getChild(ref)[pos] = n
#        retcode = p.wait()

#    import veggie.app.alignment.bowtie as bowtie
#    reflist = bowtie.Bowtie.get_indexes_path() + bowtie.Bowtie.get_index('hg18') + '.fa.fai'
#    if not os.path.exists(reflist):
#        print 'cannot find reflist'
#        sys.exit(0)
#import logging
#import h5py
#
#from veggie.io.bedgraph import read_bedgraph
#
#
#def bedgraph_to_coverage(analysis_id, bedgraph_file, path_to_hdf5,
#                         unique_only=True, 
#                         dtype=np.float,
#                         compression='lzf'):
#    # reopen the hdf5 file
#    h5file = h5py.File(path_to_hdf5)
#    # create hdf5 subgroup for this sample name
#    logging.debug("        creating analysis group %s" % analysis_id)
#    # create subgroup for this analysis
#    subgroup = h5file.create_group(analysis_id)
#    # create the individual datasets for this hdf5 file
#    for chrom in get_chrom_names():        
#        dset_len = get_chrom_length(chrom)
#        dset = subgroup.require_dataset(chrom,
#                                        (dset_len,),
#                                        dtype=dtype,
#                                        compression=compression)            
#    # read bedgraph file and write contents to hdf5
#    fhd = open(bedgraph_file)    
#    for res in read_bedgraph(fhd):
#        chrom, start, end, value = res
#        subgroup[chrom][start:end] = value
#    fhd.close()
#    # store attributes for this analysis
#    logging.debug("        adding attributes")
#    subgroup.attrs['read_length'] = track.read_length
#    read_total = track.total_unique if unique_only else track.total
#    subgroup.attrs['read_total'] = read_total       
#    subgroup.attrs['unique_only'] = unique_only
#    subgroup.attrs['unique_reads'] = track.total_unique
#    subgroup.attrs['total_reads'] = track.total
#    
#    for k,v in subgroup.attrs.iteritems():
#        logging.debug('            %s: %s' % (k, v))
#    # finished this analysis
#    del track
#    h5file.close()