#
#    R library extension of Rsamtools
#    Copyright (C) 2012  Nuno A. Fonseca (nuno dot fonseca at gmail dot com)
#
#    This library is free software; you can redistribute it and/or
#    modify it under the terms of the GNU Library General Public
#    License as published by the Free Software Foundation; either
#    version 2 of the License, or (at your option) any later version.
#
#    This library is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#    Library General Public License for more details.
#
#    You should have received a copy of the GNU Library General Public
#    License along with this library; if not, write to the Free Software
#    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA
#*/
# $Date$
# $Id$
library(Rsamtools)
library(parallel)

# Public functions start with em.

if(packageVersion("Rsamtools") < "1.6.4")
  message("It is recommended that you use the latest version of 'Rsamtools' package.")

#options("cores"=multicore:::detectCores())

###################
# Query Name, special op, scanBamParam
# unique?
# multimaps
# TODO: use em.query to create the instances
queries <- list()
queries[["All entries"]]    <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=NA)))
queries[["Valid entries"]]  <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE)))
queries[["Alignments"]]     <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE)))
queries[["Unmapped"]]       <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=TRUE,isNotPassingQualityControls=TRUE)))
queries[["Not duplicated"]] <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isDuplicate=FALSE)))
queries[["Duplicated"]]     <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isDuplicate=TRUE)))
queries[["Alignments with Simple Cigar"]] <- list(where=NULL,count=NULL,param=ScanBamParam(simpleCigar=TRUE,flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE)))

# primary(reads)+not primary=Alignments
queries[["Primary"]]        <- list(where=NULL,count=NULL,param=ScanBamParam(scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isNotPrimaryRead=FALSE)))
queries[["Not primary"]]    <- list(where=NULL,count=NULL,param=ScanBamParam(scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isNotPrimaryRead=TRUE)))

####
# Paired alignment
# Alignments with paired fragments
queries[["Paired"]]        <- list(where=NULL,count=NULL, param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isPaired=TRUE)))
# Alignments with both paired fragments mapped
queries[["Paired ok"]]     <- list(where=NULL,count=NULL, param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isPaired=TRUE,hasUnmappedMate=FALSE)))

# Alignments with only one of the fragments mapped
queries[["Unmapped mate"]] <- list(where=NULL,count=NULL, param=ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE,isNotPassingQualityControls=TRUE,isPaired=TRUE,hasUnmappedMate=TRUE)))


queries[["Paired 1st"]] <- list(where=NULL,count=NULL, param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE,isPaired=TRUE,isFirstMateRead=TRUE,isSecondMateRead=FALSE)))
queries[["Paired 2nd"]] <- list(where=NULL,count=NULL, param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE,isPaired=TRUE,isFirstMateRead=FALSE,isSecondMateRead=TRUE)))


queries[["Paired 1st mapped"]] <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE,isPaired=TRUE,isFirstMateRead=TRUE,isSecondMateRead=FALSE,isUnmappedQuery=FALSE)))
queries[["Paired 2nd mapped"]] <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE,isPaired=TRUE,isFirstMateRead=FALSE,isSecondMateRead=TRUE,isUnmappedQuery=FALSE)))

queries[["Paired 1st properly"]] <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE,isPaired=TRUE,isFirstMateRead=TRUE,isSecondMateRead=FALSE,isUnmappedQuery=FALSE,isProperPair=TRUE)))
queries[["Paired 2nd properly"]] <- list(where=NULL,count=NULL,param=ScanBamParam(flag=scanBamFlag(isNotPassingQualityControls=TRUE,isPaired=TRUE,isFirstMateRead=FALSE,isSecondMateRead=TRUE,isUnmappedQuery=FALSE,isProperPair=TRUE)))

#
#q<-getQueryParam("all")
#q<-getQueryParam("paired ok")
#q<-setQueryParamOp(q,list("unique"))
#q3<-setQueryParamOp(q,list("unique",c("grep","NM","[01]")))
#q3<-setQueryParamOp(q,list("unique","spliced",c("grep","NM","[01]")))
#qe.preprocess(q)
#qe.preprocess(q3)
#################################################################################

is.what.field <- function(field) {
  if (sum(field==scanBamWhat())>0) return(TRUE);
  return(FALSE)
}

get.count.field <- function(count) {
  if ( is.null(count) || length(count)<1) { return(NULL); }
  if ( length(count) == 2 && count[1]=="distinct" && count[2]!="*") {
    return(count[2])
  }
  if ( length(count) == 1 && count[1]!="distinct" && count[1]!="*") {
    return(count[1])
  }
  return(NULL)
}
#
em.valid.query <- function(query) {
  if (!is.list(query)) { return(FALSE); }
  sb.param <- em.getQueryScanBamParam(query)
  if (!is.object(sb.param)||class(sb.param)[1]!="ScanBamParam") { return(FALSE); }
  return(TRUE);
}
###################################################
# Returns a vector with the names of the available predefined queries
em.getQueryNames <- function() {
  return(names(queries))
}

# ===========================
# builds an em.query "object"
# optionally based  on a predefined query (name) or on an existing query
em.query <- function(name=NULL,query=NULL,tag=NULL,which=NULL,what=NULL,flags=NULL,count=NULL,where=NULL,select=NULL) {
  
  #exact match
  if (!is.null(query)) {
    q <- query
  } else if (is.null(name)) {
    q <- list(where=where,count=count,param=ScanBamParam())
  } else {
    idx <- grep(paste("^",name,"$",sep=""),names(queries),ignore.case=TRUE)
    if (length(idx)!=1) {
      idx <- grep(name,names(queries),fixed = TRUE)
      if (length(idx)!=1 ) { return(NULL);}    
    }
    q <- queries[[idx]]
  }
  # select
  
  # Count - add field to selection
  if ( !is.null(count)) {
    q <- em.queryCount(q,count)
    # add the count field to the tag/what list
    field <- get.count.field(count)
    
    if ( !is.null(field) ) {
      if ( !is.what.field(field) ) {
        tag <- unique(append(tag,field))
      } else {
        what <- unique(append(what,field))
      }
    }
  }
  for (field in select) {
    if ( !is.what.field(field) ) {
        tag <- unique(append(tag,field))
      } else {
        what <- unique(append(what,field))
      }
  }
  if ( !is.null(tag) || !is.null(which) || !is.null(what) || !is.null(flags)) {
    sb.param <- em.getQueryScanBamParam(q)
    if ( !is.null(tag))   { bamTag(sb.param) <- unique(append(bamTag(sb.param),tag)) }
    if ( !is.null(which)) { bamWhich(sb.param) <- which }
    if ( !is.null(what))  { bamWhat(sb.param) <- unique(append(bamWhat(sb.param),what)) }
    if ( !is.null(flags)) { bamFlag(sb.param) <- bamFlagAND(bamFlag(sb.param,asInteger=TRUE),flags) }
    q <- em.setQueryScanBamParam(q,sb.param)
  }
  if ( !is.null(where)) { q <- em.setWhere(q,where) }
  return(q)
}


#
em.getQueryScanBamParam <- function(query) {
  return(query[["param"]])
}
#
em.setQueryScanBamParam <- function(query,param) {
  query[["param"]] <- param
  return(query)
}
#
em.queryWhere <- function(query){
  return(query[["where"]])
}
#
em.setWhere <- function(query,where) {  
  # where should be a list
  if ( !is.list(where) ) { stop(sprintf("list expected and got a '%s'",typeof(where))) }
  # TODO: check if where is valid
  query[["where"]] <- where
  return(query)
}
#
em.queryCount <- function(query,count) {
  # TODO: validate count  
  query[["count"]] <- count
  return(query)
}
#
em.getqueryCount <- function(query) {
  return(query[["count"]])
}

###################################################
# A Simple Query engine

# supported where: unique, multimap, multialign, spliced, op,field/tag,value
# op=grep,=,>,<,>=,<=
is.valid.where <- function(where) {
  if (sum(valid.where()==where)>0) return(TRUE)
  return(FALSE)
}
valid.where <- function() {
  c("grep","==","=","gt","ge","lt","le","eq",">","<",">=","<=","!=")
}
qe.apply.where <- function(filter.op,filter.value,v) {

  if (is.null(v)) { return(v) }
  if (!is.vector(v)) { stop(sprintf("vector expected and got a '%s'",typeof(v))) }
  #if (!is.numeric(filter.value)) { stop(sprintf("expected numeric value and got '%s'",filter.value)) }

  switch(filter.op,
         "!=" = (v!=filter.value),         
         "eq" = (v==filter.value),
         "=" = (v==filter.value),
         "==" = (v==filter.value),
         "gt" = (v>as.numeric(filter.value)),
         ">" = (v>as.numeric(filter.value)),
         "ge" = (v>=as.numeric(filter.value)),
         ">=" = (v>=as.numeric(filter.value)),
         "lt" = (v<as.numeric(filter.value)),
         "<" = (v<as.numeric(filter.value)),
         "le" = (v<=as.numeric(filter.value)),
         "<=" = (v<=as.numeric(filter.value)),
         NA
         )
}

# preprocess the query object
# changing the scanBam object
# op(grep, comp-the variable should be explicitly included in the query through what or tag
qe.preprocess <- function(query) {
  where <- em.queryWhere(query)

  if ( is.null(where) ) { return(query) }
  param <- em.getQueryScanBamParam(query)  
  # where should be a list
  if ( !is.list(where) ) { stop(sprintf("list expected and got a '%s'",typeof(where))) }
  n.where <- length(where)
  for (i in c(1:n.where) ) {
    c.where <- where[[i]]
    if (length(c.where)>1) {
      if ( is.valid.where(c.where[1]) ) {
        field    <- c.where[2]
        if (is.what.field(field)) {
          bamWhat(param) <- unique(append(bamWhat(param),field))
        } else {# field from a tag
          bamTag(param) <- unique(append(bamTag(param),field))
        }
      }
    } else {
      if ( c.where=="spliced" ) {
          bamWhat(param) <- unique(append(bamWhat(param),"cigar"))
      }
      if ( c.where=="multialign" ) {
        # if primary flag is on then we get the number of reads that align to multiple locations
        bamTag(param)  <- unique(append(bamTag(param),"NH"))
        bamFlag(param) <- bamFlagAND(bamFlag(param,asInteger=T),scanBamFlag(isUnmappedQuery=FALSE))
      }
      if ( c.where=="unique" || c.where=="multimap" ) {
        # unique - reads with a single alignment
        # multimap - reads that map to multiple locations
        # Uniquely mapped=primary reads that have only one alignment in the bam file (NH)
        # multimaps NH>1
        bamFlag(param) <- bamFlagAND(bamFlag(param,asInteger=T),scanBamFlag(isUnmappedQuery=FALSE,isNotPrimaryRead=FALSE))
        bamTag(param) <- unique(append(bamTag(param),"NH"))
      }
    }
  }
  query <- em.setQueryScanBamParam(query,param)
  return(query)
}
#
# parses the results
qe.parse.results <- function(query,res.tmp) {
  where <- em.queryWhere(query)
  if ( !is.null(where) ) {   
     # hwhere should be a list
    if ( !is.list(where) ) { stop(sprintf("list expected and got a '%s'",typeof(where))) }
    n.where <- length(where)
    field <- ''
    where.value <-'' 
    for (i in c(1:n.where) ) {
      c.where <- where[[i]]
      if(length(c.where)>1) {
        where.value <- c.where[3]
        field <- c.where[2]
        c.where <- c.where[1]
      }
      #######
      # Alias
      if ( c.where=="spliced" ) {
        c.where <- "grep"
        field <- "cigar"
        where.value <- "N"
      }
      if ( c.where=="unique" ) {
        # Uniquely mapped=primary reads that have only one alignment in the bam file (NH)
        #"c("=","NH",1)
        sel <- res.tmp[[1]][["tag"]][["NH"]]==1
      }
      if ( c.where=="multimap" || c.where=="multialign") {
      # multimaps NH>1
      # if primary flag is on then we get the number of reads that align to multiple locations
        #"c(">","NH",1)
        sel <- res.tmp[[1]][["tag"]][["NH"]]>1
      }
      ################################
      # Grep and comparison operations
      if ( c.where == "grep" ) {
        if (is.what.field(field)) {
          sel <- grepl(where.value,x=res.tmp[[1]][[field]])
        } else {# tag field
          sel <- grepl(where.value,x=res.tmp[[1]][["tag"]][[field]])
        }      
      } else if ( is.valid.where(c.where) ) {
        if (is.what.field(field)) {
          sel <- qe.apply.where(c.where,where.value,res.tmp[[1]][[field]])
        } else {# tag field 
          sel <- qe.apply.where(c.where,where.value,res.tmp[[1]][["tag"]][[field]])
        }            
      }
      # apply the selection to all lists
      sel.len <- length(sel)    
      if (sel.len>0) {
        for (n in names(res.tmp[[1]])) {
          if (n=="tag") {
            for (tagn in names(res.tmp[[1]]$tag)) {
              res.tmp[[1]][["tag"]][[tagn]] <- qe.res.apply.filter(res.tmp[[1]][["tag"]][[tagn]],sel,sel.len)
            }
          } else {
            res.tmp[[1]][[n]] <- qe.res.apply.filter(res.tmp[[1]][[n]],sel,sel.len)
          }
        }
      }
    }
  }
  # we can now do the counting
  # count distinct (var) = counts for each var value
  count <- query[["count"]]
  if ( is.null(count) || length(count)<1) { return(res.tmp) }
  if ( length(count) == 2 && count[1]=="distinct" && count[2]!="*") {
    return(list(count=resultlist2count(res.tmp,count[2],distinct=TRUE)))
  }
  return(list(count=resultlist2count(res.tmp,count[1])))
}

qe.res.apply.filter <- function(v,sel,sel.len){
  # we can only apply the filter if the vectors have the same size
  if (length(v)!=sel.len) { return(v) }
  return(v[sel])
}
# End query engine
#--------------------------------------------------------------------

#####################################################################
# Query engine count/scan sequential functions
countBam.qe <- function(file,query,index=file,...) {
  # TODO: check if file exists
  # where must be == NULL
  if(!em.valid.query(query)) {   stop(paste("Invalid query ",query," while processing",file,sep="")) }
  query <- qe.preprocess(query)
  tmp <- countBam(file=file,index=index,param=em.getQueryScanBamParam(query),...)
  tmp
}

scanBam.qe <- function(file,query,index=file,...) {
  if(!em.valid.query(query)) {   stop(paste("Invalid query ",query," while processing",file,sep="")) }
  query <- qe.preprocess(query)
  tmp <- scanBam(file=file,index=index,param=em.getQueryScanBamParam(query),...)
  return(qe.parse.results(query,tmp))
}
# End  Query engine count/scan sequential functions
#-----------------------------------------------------

######################################################
# Parallel version of scan/countBam.qe

#
scanBam.qe.p <- function(file,query,index=file,seqs=NULL,...) {
  if(!em.valid.query(query)) {   stop(paste("Invalid query ",query," while processing",file,sep="")) }
  if ( is.null(seqs) ) {
    seqs <- em.refsFromBam(file)
  }
  em.scanBam.qe2 <- function(seq,file,index,query,refsLen,...) {
    param <- em.getQueryScanBamParam(query)
    bamWhich(param) <- GRanges(seq,IRanges(1,refsLen[seq]))
    query <- em.setQueryScanBamParam(query,param)
    r<-scanBam.qe(file=file,index=index,query=query,...)
    return(r)
  }
  # TODO: fix this: if bamWich defined then do not call em.scanBam.qe2
  refsLen <- em.refsLengthFromBam(file)

  v<-mclapply(seqs,em.scanBam.qe2,file=file,index=index,query=query,refsLen=refsLen,...)

  
  if (!is.null(em.getqueryCount(query))) {    
    return(mergeSumLists(v))
  }
  # merge results
  return(mergeLists(v))
}


##################
# countBam wrapper
# parallel version of countBam
countBam.qe.p <- function(file,query,index=file,seqs=NULL,...) {

  if(!em.valid.query(query)) {   stop(paste("Invalid query ",query," while processing",file,sep="")) }
  if ( is.null(seqs) ) {
    seqs <- em.refsFromBam(file)
  }
  em.countBam.qe2 <- function(seq,file,index,query,refsLen,...) {
    param <- em.getQueryScanBamParam(query)
    bamWhich(param) <- GRanges(seq,IRanges(1,refsLen[seq]))
    query <- em.setQueryScanBamParam(query,param)
    #print(query)
    return(countBam.qe(file=file,index=index,query=query,...))
  }
  refsLen <- em.refsLengthFromBam(file)
  v<-mclapply(seqs,em.countBam.qe2,file=file,index=index,query=query,refsLen=refsLen,...)
  #v<-mclapply(seqs,em.countBam.qe2,file=file,index=index,query=query,...)
  m <- data.frame(matrix(unlist(v),nrow=length(seqs),byrow=T))
  colnames(m) <- names(v[[1]])
  m[1,"records"]<-sum(m[,"records"])
  m[1,"nucleotides"]<-sum(m[,"nucleotides"])
  m[1,1:4]<-NA
  # fix filename
  m[1,"file"] <- as.character(v[[1]]$file)
  return(as.vector(m[1,]))
}

################################
# Alias (public interface)
em.countBam <- function(file,query,index=file,seqs=NULL,...) {
  if (is.null(query)) {
    query <- em.query()
  }
  if(!em.valid.query(query)) {
    stop(paste("Invalid query ",query," while processing",file,sep=""))
  }
  if (!file.exists(file) || !file.exists(paste(index,".bai",sep=""))) {
    stop(paste("File ",file," and/or index file ",index,".bai missing",sep=""))
  }
  return(countBam.qe.p(file,query,index,seqs=seqs,...))
}

em.scanBam <- function(file,query,index=file,seqs=NULL,...) {
  if (is.null(query)) {
    query <- em.query()
  }
  if(!em.valid.query(query)) {
    stop(paste("Invalid query ",query," while processing",file,sep=""))
  }
  if (!file.exists(file) || !file.exists(paste(index,".bai",sep=""))) {
    stop(paste("File ",file," and/or index file ",index,".bai missing",sep=""))
    return(NULL)
  }
  return(scanBam.qe.p(file=file,query=query,seqs=seqs,...))
}

# End Parallel version of scan/countBam.qe
#---------------------------------------------


#############################
# Utils
#
#

# if field=* then count the (distinct) elements in the first vector found
resultlist2count <- function(res,field="*",distinct=FALSE) {

  do.count <- function(res,distinct) {
    if (distinct==TRUE) {
      return(table(res))
    }
    return(length(res))
  }
  
  if (is.list(res)) {
    n <- names(res)
    if (is.null(n)) {       # no names to match
      for ( i in c(1:length(res)) ) {
        r <- resultlist2count(res[[i]],field,distinct)
        if (!is.null(r)) { return(r) }
      }
    } else {
      # lookup
      if ( field!="*" ) {
        idx <- grep(field,n,fixed=TRUE)
        if (length(idx)>0) {
          return(do.count(res[[idx]],distinct))
        }
      }
      # lookup failed or field=="*"
      for ( idx in n ) {
        if ( field=="*" && !is.list(res[[idx]]) ) {
          return(do.count(res[[idx]],distinct))
        }
        r <- resultlist2count(res[[idx]],field,distinct)
        if (!is.null(r)) { return(r) }
      }
    }
  }
  return(NULL);
}

#
mergeLists <- function(l) {
  n <- length(l)
  if ( n==1) { return(l) }
  r <- l[[1]]
  r.names <- names(r) 
  for ( i in c(2:n)) {
    r <- mergeList(l[[i]],r)
  }
  names(r) <- r.names
  return(r)
}

mergeList <- function(lSrc,lDst) {
  mergeList2 <- function(i,lDst,lSrc) { return (mergeList(lSrc[[i]],lDst[[i]]))}
  if(is.list(lDst)) {
    if(length(lSrc)==0) { return(lSrc); }
    n <- names(lDst)
    lDst <- mclapply(c(1:length(n)),mergeList2,lDst,lSrc)
    names(lDst) <- n
  } else if (is.vector(lDst)) {
    # assume that lSrc is also a vector
    # TODO: assert!
    lDst <- append(lDst,lSrc)
  } else {
    #ERROR
  }
  return(lDst)
}

#
mergeSumLists <- function(l) {
  n <- length(l)
  if ( n==1) { return(l[[1]]) }
  r <- l[[1]]
  r.names <- names(r) 
  for ( i in c(2:n)) {
    r <- sumList(l[[i]],r)
  }
  names(r) <- r.names
  return(r)
}

sumList <- function(lSrc,lDst) {
  sumList2 <- function(i,lDst,lSrc)  { return (sumList(lSrc[[i]],lDst[[i]]))}
  if(is.null(lSrc) || !length(lSrc)) { return(lDst);}
  if(is.null(lDst) || !length(lDst)) { return(lSrc);}
  if(is.list(lDst) && length(lDst)>=1) {
      n <- names(lDst)
      if (length(n)>=1) { # use names
        lDst <- mclapply(n,sumList2,lDst,lSrc)
      } else {
        lDst <- mclapply(c(1:length(lDst)),sumList2,lDst,lSrc)
      }
      names(lDst) <- n
  } else if (length(names(lDst))>0) {
    # assume that lSrc is also a vector
    # TODO: assert!
    unique.names<-unique(append(names(lDst),names(lSrc)))
    msum<-function(x,v) {sum(v[names(v)==x])}
    lDst<-unlist(mclapply(unique.names,msum,append(lDst,lSrc)))
    names(lDst) <- unique.names   
  } else if (length(lDst)>=1) {
    lDst <- lSrc+lDst
  }
  #else if (!is.null(lSrc)) {
  #  lDst <- lSrc+lDst }
  return(lDst)
}

# return a list with the table for each element of l
list2table <- function(l,level=3) {
  lt <- list()
  for ( n in names(l) ) {
    if (is.list(l[[n]]) ) {
      if ( level>1 ) {
        lt[[n]] <- list2table(l[[n]],level-1)
      }
    } else {
      lt[[n]] <- table(l[[n]])
    }
  }
  return(lt)
}
#
countInVector <- function(filter.where,filter.value,v) {

  if (!is.vector(v)) { stop(sprintf("vector expected and got a '%s'",typeof(v))) }
  #if (!is.numeric(filter.value)) { stop(sprintf("expected numeric value and got '%s'",filter.value)) }

  switch(filter.where,
         "eq" = (sum(v==filter.value)),
         "=" = (sum(v==filter.value)),
         "gt" = (sum(v>filter.value)),
         ">" = (sum(v>filter.value)),
         "ge" = (sum(v>=filter.value)),
         ">=" = (sum(v>filter.value)),
         "lt" = (sum(v<filter.value)),
         "<" = (sum(v<filter.value)),
         "le" = (sum(v<=filter.value)),
         "<=" = (sum(v<=filter.value)),
         NA
         )
}

#############
# 
pverbose <- 1
pinfo <- function(...) {
  if (pverbose)
    message(paste("[INFO] ",...,"\n",sep=""))
}

pwarning <- function(...) {
  warning(paste("[WARNING] ",...,"\n",sep=""))
}


perror <- function(...) {
  warning(paste("[ERROR] ",...,"\n",sep=""))
}

##############################
em.refsFromBam <- function(bam.file) {
  if ( !is.character(bam.file) || !file.exists(bam.file) ) { return(NULL) }
  seqs<-scanBamHeader(bam.file)[[1]]
  return(names(seqs$targets))
}

em.refsLengthFromBam <- function(bam.file) {
  if ( !is.character(bam.file) || !file.exists(bam.file) ) { return(NULL) }
  seqs<-scanBamHeader(bam.file)[[1]]
  return(seqs$targets)
}

#################################################################
# Parallel count versions of some Rsamtools functions


###################
# Parallel counting
em.bams.counts.df <- function(bams.files) {
  stopifnot(file.exists(bams.files))
  l<-mclapply(bams.files,em.bam.counts.df)
  #l<-lapply(bams.files,bam.counts.df)
  l
}


#################################################################
# Single bam file
#
#
bam.index.prefix <- function(bam.file) {
  return(bam.file)
#  bai.file <- bam.index.file(bam.file)
#  sub(".bai$","",bai.file)  
}

em.bam.index.file <- function(bam.file) {
  stopifnot(is.character(bam.file))

#  bai.file <- sub(".bam$",".bai",bam.file)
  bai.file <- paste(bam.file,".bai",sep="")
  if ( ! file.exists(bai.file) ) {
    pinfo("Creating index file ",bai.file," for ",bam.file,"...")
    bai.file <- indexBam(bam.file)
    #system(paste("samtools index ",bam.file," ",bai.file,sep=""))
    pinfo("DONE")
  } else {
    pinfo("Cached index file ",bai.file," for ",bam.file,"...")   
  }
  bai.file   
}

# + bam.file
em.bam.flavour <- function(bam.file) {
  data<-bam.flavour.data(bam.file)
  if(length(data)==1) { return(NA) }
  return(paste(data["ID"],sub("\\..*","",data["VN"]),sep="-"))
}
# Flavours "supported"
em.flavours <- function() {
  flavours <- c("tophat","bowtie")
  return(flavours)
}
#
bam.flavour.data <- function(bam.file) {

  stopifnot(is.character(bam.file))
  
  l<-scanBamHeader(bam.file)
  pg<-l[[1]]$text$`@PG`
  if (is.null(pg)) { return(NA) }
  v <- c(1:2)
  myt<-function(x) { strsplit(x,":")[[1]] }
  v<-unlist(lapply(pg,FUN=myt))
  m <- matrix(v,byrow=T,ncol=2)
  rownames(m) <- m[,1]
  m <- m[,-1]
  m
}
#
serialize.param <- function(param) {
  rawToChar(serialize(param,NULL,ascii=TRUE))
}

unserialize.param <- function(param) {
  stopifnot(is.character(param))

  unserialize(charToRaw(param))
}

####################################
add2dataframe <- function(df,vals.v) {
  names <- colnames(df)
  df <- rbind(df,rep(NA,ncol(df)))
  colnames(df) <- names
  df[nrow(df),] <- vals.v
  df
}


# look for the first integer vector and sum all the values
sum.vector.list2sum <- function(l) {
  if (is.null(l)) {return(NULL);}
  if (is.list(l)) {
    for ( i in c(1:length(l)) ) {
      r <- sum.vector.list2sum(l[[i]])
      if (!is.null(r)) { return(r); }
    }
  } else if (length(l)>0 && is.numeric(l)) {
    return(sum(l))
  }
  return(NULL)
}


addQueryResults2df <- function(df,name,query,bam.file,bai.prefix,STRAND=FALSE) {
  pinfo(name)
  where <- em.queryWhere(query)
  if (is.null(where)) {
    all <- em.countBam(file=bam.file,index=bai.prefix,query=query)
    nucl <- all$nucleotides
    all  <- all$records
  } else {    
    all <- em.scanBam(file=bam.file,index=bai.prefix,query=query)
    if (is.null(all)||length(all[[1]])==0) { all <- 0} else { all <- all$count }
    nucl <- NA
    
  }

  if (STRAND) {
     # minus strand
    paramo <- em.getQueryScanBamParam(query)
    bamFlag(paramo) <- bamFlagAND(bamFlag(paramo,asInteger=TRUE),scanBamFlag(isMinusStrand=TRUE))
    query <- em.setQueryScanBamParam(query,paramo)
    if (is.null(where)) {
      m <- em.countBam(file=bam.file,index=bai.prefix,query=query)
      m <- m$records
    } else {
      m <- em.scanBam(file=bam.file,index=bai.prefix,query=query)
      if (is.null(m)||length(m[[1]])==0) { m <- 0} else { m <- m$count }
    }
    p <- all-m
  } else {
    p <- 0
    m <- 0
  }
  #print(c(name,all$records,all$nucleotides,p,m,serialize.param(paramo)))
  #pinfo(df,name,all,nucl,p,m)
  add2dataframe(df,c(name,all,nucl,p,m))
}

#
# returns a vector with the insert sizes
em.bam.pe.isize <- function(bam.file,query=NULL,...) {

  stopifnot(file.exists(bam.file))
  if (is.null(query)) {
    query <- em.query(name="Paired",what="isize")
  } else {
    sb.param <- em.getQueryScanBamParam(query)
    bamWhat(sb.param) <- unique(append(bamWhat(sb.param),"isize"))
    query <- em.setQueryScanBamParam(query,sb.param)
  }

  # disable counting
  query <- em.queryCount(query,NULL)
  #print(names(em.scanBam( bam.file, query)))
  isizes <- em.scanBam( bam.file, query)[[1]]$isize
  isizes
}


###################
# +bam file
# -returns a dataframe with counts
# optionally provide the total number of reads (some bam files don't include the unmapped reads)
em.bam.counts.df <- function(bam.file,use.cache=FALSE,num.reads=NA) {

  stopifnot(is.character(bam.file))  
  # TODO: check files timestamp?
  csv.file <- paste(bam.file,".counts.csv",sep="")
  cnames <- c("Label","Count","Nucleotides","Count plus","Count minus")
  
  if ( use.cache && file.exists(csv.file) ) {    
    pinfo("Cached counts file ",csv.file,"...")
    df <- as.data.frame(read.table(csv.file))
    colnames(df) <- cnames
    return(df[,-1])
  } 
  ################
  #SAM/BAM flavour...i.e., program that produced it
  flavour <- em.bam.flavour(bam.file)
  
  # create the index file (if necessary)
  bai.prefix <- bam.index.prefix(bam.file)
  em.bam.index.file(bam.file)
  
  # Data frame to store the  data
  df<-data.frame(matrix(ncol=5,nrow=0))
  colnames(df) <- cnames

  ################
  # All entries
  #
  g1 <- c("All entries","Valid entries","Unmapped","Not duplicated","Duplicated","Alignments","Alignments with Simple Cigar",
         "Primary","Not primary","Paired")
  g1.strand <- c(FALSE,FALSE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE)
  for ( i in c(1:length(g1))) {
    q <- g1[i]
    df <- addQueryResults2df(df,q,query=em.query(name=q),bam.file,bai.prefix,STRAND=g1.strand[i])
  }
  
  # Paired
  g2 <- c("Paired ok","Unmapped mate",
              "Paired 1st","Paired 1st mapped","Paired 1st properly",
              "Paired 2nd","Paired 2nd mapped","Paired 2nd properly")

  if ( df[nrow(df),"Count"]==0 ) {
    v <- rep(NA,ncol(df))
    for (l in g2) {
      v[1] <- l
      df <- add2dataframe(df,v)
    }
  } else {
    for ( i in c(1:length(g2))) {
      q <- g2[i]
      df <- addQueryResults2df(df,q,em.query(q),bam.file,bai.prefix,STRAND=TRUE)
    }
  }
  ##################
  # isNotPrimaryRead
  # A non-primary
  # read might result when portions of a read aligns to multiple
  #        locations or when we have multimaps


  ##########
  # Uniquely mapped
  # 0x100 = 256 (10) = isPrimaryRead flag
  # Uniquely mapped=primary reads that have only one alignment in the bam file (NH)
  # 
  #q<-em.query(name="Primary",where=list("unique"),count="*")
  #df <- addQueryResults2df(df,"Reads",query=q,bam.file,bai.prefix,STRAND=TRUE)
  
  #q<-em.setWhere(em.query("Alignments"),list("multialign"))
  #df <- addQueryResults2df(df,"Alignments (multi)",q,bam.file,bai.prefix,STRAND=TRUE)
  
  q<-em.query(name="Primary",where=list("multimap"),count="*")
  df <- addQueryResults2df(df,"Multimap reads",query=q,bam.file,bai.prefix,STRAND=TRUE)

  q<-em.query(name="Primary",where=list("unique"),count="*")
  df <- addQueryResults2df(df,"Uniquely mapped reads",query=q,bam.file,bai.prefix,STRAND=TRUE)


  q<-em.query(name="Alignments",where=list("spliced"),count="*")
  df <- addQueryResults2df(df,"Alignments (spliced)",q,bam.file,bai.prefix,STRAND=TRUE)

  q<-em.query(name="Alignments",where=list("unique","spliced"),count="*")
  df <- addQueryResults2df(df,"Spliced reads",q,bam.file,bai.prefix,STRAND=TRUE)

  # Multimaps
  # One needs to  distinguish between multimaps and reads that have multiple entries because they were partitioned (because map to multiple reference sequences)
  # Multimaps: unique secondary reads that appear in the bam file more than once and are not spliced
  # how to exclude the spliced reads?

  # Hits
  # H0-perfect hits
  # H1-1 difference hit
  # H2-2 difference hit


  # TODO: rewrite
  # Some aligner do not set H0, H1, H2
  # ex. tophat1
  #if ( df[nrow(df),"Count"]==df[nrow(df)-1,"Count"]) {
  
  # Minus strand
  q<-em.query("Alignments",tag=c("NM"),flags=scanBamFlag(isMinusStrand=TRUE),count=c("distinct","NM"))
  m <- em.scanBam(bam.file,query=q)[[1]]
  q<-em.query("Alignments",tag=c("NM"),flags=scanBamFlag(isMinusStrand=FALSE),count=c("distinct","NM"))
  p <- em.scanBam(bam.file,query=q)[[1]]

  pinfo("Errors")
  # Some mappers may not include the NM  field
  if (length(m)==0) { m<-0 }
  if (length(p)==0) { p<-0 }
  all <- sumList(p,m)
  df <- add2dataframe(df,c("Alignments (perfect)",all["0"],NA,p["0"],m["0"]))
  df <- add2dataframe(df,c("Alignments (1-difference)",all["1"],NA,p["1"],m["1"]))  
  df <- add2dataframe(df,c("Alignments (2-difference)",all["2"],NA,p["2"],m["2"]))
  
  df <- as.data.frame(df)
  rownames(df) <- as.character(df[,1])
  #######################################
  # Mapper specific
  if ( length(grep("^tophat",flavour,ignore.case=T))>0) {
    l <- tophat.get.unmapped.reads(bam.file)
    df["Unmapped",] <- c("Unmapped",l[1],NA,l[2],l[3])
  }
  #######################################
  # Update unmapped based on the value give in num.reads
  if ( !is.na(num.reads) ) {
    # TODO: distinguish between left and right unaligned reads
    stopifnot(is.numeric(num.reads))
    unmapped <- num.reads-as.numeric(df["Primary","Count"])
    df["Unmapped",] <- c("Unmapped",unmapped,NA,0,0)
  }
  write.table(df,csv.file)

  df <- as.data.frame(read.table(csv.file))
  colnames(df) <- cnames
  return(df[,-1])
}

# TODO: take into account the flavour
em.bam.sum.stats.table <- function(bam.file) {

  stopifnot(is.character(bam.file))

  csv.file <- paste(bam.file,".stats.sum.csv",sep="")
  if ( file.exists(csv.file) ) {    
    pinfo("Cached summary stats file ",csv.file,"...")      
  } else {
    pinfo("Generating summary stats file ",csv.file,"...")      
    system(paste("samtools flagstat ",bam.file," | sed 's/ + /,/' | sed 's/ /,/' > ",csv.file,sep=""))
    pinfo("DONE")
  }
  stats <- read.csv(csv.file,header=FALSE)
  # note not passed is ignored
  colnames(stats) <- c("passed","%","flag")
  # give a warning if there are values greater than 0
  if ( sum(stats[,"%"])>0 ) {
    pwarning("bam.sum.stats.table: Found values greater than 0 in Ignored column\n",bam.file)
    print(stats)
  }
  stats[,"flag"] <- gsub(" \\(.*$","",stats[,"flag"])
  # fix
  stats[nrow(stats),"flag"] <- paste(stats[nrow(stats),"flag"]," (maqQ>=5)",sep="")
  rownames(stats)<-stats[,"flag"]
  stats <- stats[,-3]
  # percentages
  stats["in total","%"] <- NA
  stats["duplicates","%"] <- stats["duplicates","passed"]/stats["in total","passed"]
  stats["mapped","%"] <- stats["mapped","passed"]/stats["in total","passed"]
  stats["paired in sequencing","%"] <- NA
  stats["read1","%"] <- stats["read1","passed"]/stats["paired in sequencing","passed"]
  stats["read2","%"] <- stats["read2","passed"]/stats["paired in sequencing","passed"]
  stats["properly paired","%"] <- stats["properly paired","passed"]/stats["paired in sequencing","passed"]
  stats["with itself and mate mapped","%"] <- stats["with itself and mate mapped","passed"]/stats["paired in sequencing","passed"]
  stats["singletons","%"] <- stats["singletons","passed"]/stats["paired in sequencing","passed"]
  stats["with mate mapped to a different chr","%"] <- stats["with mate mapped to a different chr","passed"]/stats["paired in sequencing","passed"]
  stats["with mate mapped to a different chr (maqQ>=5)","%"] <- stats["with mate mapped to a different chr (maqQ>=5)","passed"]/stats["paired in sequencing","passed"]
  stats[,"%"] <- stats[,"%"]*100
  # Include special cases
  # unmapped reads
  
  stats
}

#
# Number of reads mapped for each reference sequence
em.bam.readsperseq.table <- function(bam.file) {

  csv.file <- paste(bam.file,".stats.mapped.sum.csv",sep="")
  pinfo("Generating summary stats file ",csv.file,"...")
  system(paste("samtools idxstats ",bam.file," | tr '\t' ',' > ",csv.file,sep=""))
  pinfo("DONE")
  stats <- read.csv(csv.file,header=F)
  colnames(stats) <- c("Reference","RefLen","Mapped","NotMapped")             
  stats
}


###########################
# MApper specific
tophat.get.unmapped.reads <- function(bam.file) {

  left <- tophat.get.unmapped(bam.file,side="left")
  right <- tophat.get.unmapped(bam.file,side="right")
  if ( is.null(right) ) {
    return(c(left,left,0))
  }
  return(c(left+right,left,right))
}
tophat.get.unmapped<- function(bam.file,side="left") {
  dir <- dirname(bam.file)
  file.prefix <- sub(".(pe|se).*$","",basename(bam.file))
  # look for the unmapped left
  if (side=="left") {
    un <- "unmapped_left.fq.z"
  } else {
    un <- "unmapped_right.fq.z"
  } 
  tent.file <- paste(dir,file.prefix,un,sep="/")
  if ( !file_test("-f",tent.file) ) {
    # look in the same directory as the bam.file
    tent.file <- paste(dir,un,sep="/")
    if ( !file_test("-f",tent.file) ) {
      return(NULL)
    }
  }
  # faster way
  cmd <- paste("zcat ",tent.file," | wc -l",sep="")
  total.lines=system(cmd,intern=T)
  unmapped.reads <- as.integer(total.lines)/4
  return(unmapped.reads)
}
# End tophat specific code
#########################################
# bam.files <- c("inst/extdata/toy.bam","inst/extdata/se.tophat.bam","inst/extdata/pe.tophat.bam","inst/extdata/pe2.tophat.bam")

######################################################################################
# TODO: flavour specific queries
# Bowtie1
# NM:i:<N> Aligned read has an edit distance of <N>.
# CM:i:<N> Aligned read has an edit distance of <N> in colorspace. This field is present in addition to the NM field in -C/--color mode, but is omitted otherwise.
# MD:Z:<S> For aligned reads, <S> is a string representation of the mismatched reference bases in the alignment. See SAM format specification for details. For colorspace alignments, <S> describes the decoded nucleotide alignment, not the colorspace alignment.
# XA:i:<N> Aligned read belongs to stratum <N>. See Strata for definition.
# XM:i:<N> For a read with no reported alignments, <N> is 0 if the read had no alignments. If -m was specified and the read's alignments were supressed because the -m ceiling was exceeded, <N> equals the -m ceiling + 1, to indicate that there were at least that many valid alignments (but all were suppressed). In -M mode, if the alignment was randomly selected because the -M ceiling was exceeded, <N> equals the -M ceiling + 1, to indicate that there were at least that many valid alignments (of which one was reported at random).

# Bowtie2
#AS:i:<N>  Alignment score. Can be negative. Can be greater than 0 in --local mode (but not in --end-to-end mode). Only present if SAM record is for an aligned read.
# XS:i:<N> Alignment score for second-best alignment. Can be negative. Can be greater than 0 in --local mode (but not in --end-to-end mode). Only present if the SAM record is for an aligned read and more than one alignment was found for the read.
# YS:i:<N> Alignment score for opposite mate in the paired-end alignment. Only present if the SAM record is for a read that aligned as part of a paired-end alignment.
# XN:i:<N> The number of ambiguous bases in the reference covering this alignment. Only present if SAM record is for an aligned read.
# XM:i:<N> The number of mismatches in the alignment. Only present if SAM record is for an aligned read.
# XO:i:<N> The number of gap opens, for both read and reference gaps, in the alignment. Only present if SAM record is for an aligned read.
# XG:i:<N>  The number of gap extensions, for both read and reference gaps, in the alignment. Only present if SAM record is for an aligned read.
#NM:i:<N> The edit distance; that is, the minimal number of one-nucleotide edits (substitutions, insertions and deletions) needed to transform the read string into the reference string. Only present if SAM record is for an aligned read.
#YF:Z:<N> String indicating reason why the read was filtered out. See also: Filtering. Only appears for reads that were filtered out.
# MD:Z:<S> A string representation of the mismatched reference bases in the alignment. See SAM format specification for details. Only present if SAM record is for an aligned read.

# TopHat2
# Reads can be aligned to potential fusion transcripts if the --fusion-search option is specified. The fusion alignments are reported in SAM format using custom fields XF and XP (see the output format)

# MapSplice
# IH/HI are used to indicate when a read is aligned to multiple places



## #############################
## #
## select2samfile <- function(bam.file,obamfile,query,index=paste(bam.file,".bai",sep="")) {
##   require(rbamtools)
##   bam.file<-"/home/nf/Research/myR/src/myBAM/examples/toy.bam"
##   index<-paste(bam.file,".bai",sep="")
##   obamfile <- "/tmp/lixo.bam"
##   query <- em.query(name="Primary",where=list("unique"))


##   query <- qe.preprocess(query)
##   scanBamParam <- em.getQueryScanBamParam(query)
  
##   reader<-bamReader(bam.file)
##   isOpen(reader)
    
##   loadIndex(reader,index)
##   if(!index.initialized(reader)) {
##     # error
##   }

##   writer<-bamWriter(reader,obamfile)
  

##     # return TRUE or false
##   flagsOK <- function(align,scanBamParam) {
##     #align.flags<-flag(align)
##     #query <- q
##     query.flags <- bamFlag(scanBamParam)
##     for ( flag in names(query.flags) ) {
##       flag.value <- query.flags[flag]
##       if (!is.na(flag.value)) {
##         switch(flag,
##                "isPaired"          = r <- (paired(align)==flag.value),
##                "isProperPair"      = r <- (properPair(align)==flag.value),
##                "isUnmappedQuery"   = r <- (unmapped(align)==flag.value),
##                "hasUnmappedMate"   = r <- (mateUnmapped(align)==flag.value),
##                "isMinusStrand"     = r <- (reverseStrand(align)==flag.value),
##                "isMateMinusStrand" = r <- (mateReverseStrand(align)==flag.value),
##                "isFirstMateRead"   = r <- (firstInPair(align)==flag.value),
##                "isSecondMateRead"  = r <- (secondInPair(align)==flag.value),
##                "isNotPrimaryRead"  = r <- (secondaryAlign(align)==flag.value),
##                # there is a bug in Rsamtools Rsamtools_1.8.4 where isNotPassingQualityControls is always set to FALSE
##                #"isNotPassingQualityControls" = r <- (failedQC(align)==(!flag.value)),
##                "isNotPassingQualityControls" = r <- TRUE,
##                "isDuplicate"       = r <- (pcrORopt_duplicate(align)==flag.value)
##                )
##         if ( r==FALSE ) {
##           pinfo("Failed ",flag," ",flag.value)
##           return (FALSE)
##         }
##       }
##     }
##     return(TRUE)
##   }

##   rbamtools.getCigarString <- function(align) {
##     v <- c()
##     a <- cigarData(align)
##     for (i in c(1:length(a[[1]]))) { v<-append(v,paste(a[[1]][i],a[[2]][i],sep="")) }
##     return(paste(v,sep="",collapse=""))
##   }
##   #################################################
##   # Find the appropriate refID
##   #
##   seqname2refId <- function(reader,seqname) {
##       #  rfd: data.frame The returned data.frame contains three columns: For
##       #  each reference sequence, the corresponding row contains the
##       #   Reference-ID (1st column, refID), the Reference name (2nd column,
##       #   refName) and the length of the Reference sequence (3rd column,
##       #   refLength).
##       rdf<-getRefData(reader)
##       loc <- rdf[,"SN"]==seqname
##       if (sum(loc)==0) { return(NA) }
##       Ids <- rdf[,"ID"]
##       return(Ids[loc])
##     }
##   refId2seqname <- function(reader,refid) {
##       #  rfd: data.frame The returned data.frame contains three columns: For
##       #  each reference sequence, the corresponding row contains the
##       #   Reference-ID (1st column, refID), the Reference name (2nd column,
##       #   refName) and the length of the Reference sequence (3rd column,
##       #   refLength).
##       rdf<-getRefData(reader)
##       loc <- rdf[,"ID"]==refid
##       if (sum(loc)==0) { return(NA) }
##       Names <- rdf[,"SN"]
##       return(Names[loc])
##   }
##   seqname2Length <- function(reader,seqname) {
##       #  rfd: data.frame The returned data.frame contains three columns: For
##       #  each reference sequence, the corresponding row contains the
##       #   Reference-ID (1st column, refID), the Reference name (2nd column,
##       #   refName) and the length of the Reference sequence (3rd column,
##       #   refLength).
##       rdf<-getRefData(reader)
##       loc <- rdf[,"SN"]==seqname
##       if (sum(loc)==0) { return(NA) }
##       Len <- rdf[,"LN"]
##       return(Len[loc])
##   }
##   #bamRange(reader, coords)
##   #The method returns a list of bamAlign’s from which overlap with the specified region
##   align<-getNextAlign(reader)

##   ###################
##   # TODO: Which
##   # filter by coords
##   if (whichDefined) {
##     seqname <- "ref"; startpos <- 0; endpos <- 1e07;
##     refId <- seqname2refId(reader,seqname)
##     maxRefLen <- seqname2Length(reader,seqname)
##     coords <- as.interger(c(refId,startpos,min(endpos,maxRefLen)))
##     range <- bamRange(reader, coords)
##     obj <- range
##     pinfo("Range size:",size(range))
##   } else {
##     obj <- reader
##   }

##   align<-getNextAlign(obj)
##   while ( !is.null(align)) ) {
##   #TODO
##   #SimpleCigarOK(align,scanBamParam) &&
##   #     bamReverseComplementOK(align,scanBamParam) &&
##   # 
##   # matePosition(align)
##   # cigarData
##   # Flags  
##     if ( flagsOK(align,scanBamParamn) ) {
##     # Apply the selection operation
##       pinfo("OK")
##     }
##     align<-getNextAlign(obj)
##   }
##   #scanBamWhat()
##   switch(what,
##          "qname" = val <- name(align),
##          "flag"  = val <- flag(align),
##          "rname" = val <- refId2seqname(reader,name(align)),
##          "strand"= val <- (if(reverseStrand(align)) { "-" } else { "+" })
##          "pos"   = val <- position(align),
##          "qwidth"= val <- width(readBases(align)),
##          "mapq"  = val <- mapQuality(align),
##          "cigar" = val <- rbamtools.getCigarString(align),
##          "mrnm"  = val <- NA,#TODO
##          "mpos"  = val <- matePosition(align),
##          "isize" = val <- insertSize(align),
##          "seq"   = val <- readBases(align),
##          "qual"  = val <- qualities(align)
##          )



##   #bamSave(writer,bamAlign/bamRange)
## #  coords<-as.integer(c(0,0,1874))
## #writer<-bamWriter(reader,"test.bam")
## #bamSave(writer,align)
## #  bamSave(writer,range)
##   #bamRange(reader, coords)
##   coords<-as.integer(c(0,0,1874))
##   range<-bamRange(reader,coords)
##   align<-getNextAlign(range)

  
##   bamClose(reader)
##   bamClose(writer)
##   isOpen(reader)

## }
# End 
#------------------------------------------
