#
# Copyright (c) 2010, Davide Cittaro
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of the <organization> nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#

from bx.binned_array import BinnedArray
import bx.wiggle
import bx.bbi.bigwig_file
from optparse import OptionParser
import sys, os, pysam
import numpy as np
import matplotlib.pyplot as plt
import gzip
import struct
import ConfigParser
import subprocess
import tempfile
from NumAnalysis import summaryStatistics
from Helpers import timestring, Debug


def removePipelineSteps(options, Steps):
  for s in Steps:
    sidx = options.pipeline.find(s)
    if sidx >= 0:
      Debug(1, "Warning: step", s, "has been removed from your pipeline")
      options.pipeline = options.pipeline[:sidx] + options.pipeline[sidx + 1:]


def prepareOptions():
  usage= "%prog <-i file> [options]"
  description = "A tool to analyze genomic enrichment data inspired by DSP techniques"
  theParser = OptionParser(version="%prog 0.8.8", description=description,usage=usage)
  
  theParser.add_option("-i","--sigA",dest="sigA",type="string",help="The first signal file (ChIP)", default=None)
  theParser.add_option("-c","--sigB",dest="sigB",type="string",help="The second signal file (Control)", default=None)
  theParser.add_option("--pl",dest="pipeline",type="string",help="The signal analysis pipeline string. Each step can be [A]utocorrelation, [W]avelet denoise, [G]aussian smoothing, [F]IR smoothing, [X]-correlate, con[V]olve, [E]qualize, [N]ormalize, s[C]ale, [S]ubtract, [R]ratio, [L]og2ratio, [T]hresholding, [Z]ero min. Example: FETN will perform FFT, equalization, thresholding and normalization in this order", default="NFTZ")
  theParser.add_option("--winfun",dest="windowingFunction",type="string",help="Windowing function for profile output [mean|med|max]", default="mean")
  theParser.add_option("--fir", dest="firName", type="string", help="The FIR window name", default="flat")
  theParser.add_option("--threshold", dest="threshMode", type="string", help="The thresholding method [otsu | mad | <float>]", default='otsu')
  theParser.add_option("-n","--name",dest="name",type="string",help="The experiment name", default="default")
  theParser.add_option("-W","--wavelet",dest="wavelet",type="string",help="The wavelet to use. This should be defined in pywt library", default="db5")
  theParser.add_option("-C","--corr", dest="correlate",action="store_true",help="Perform correlation analysis between two samples",default=False)
  theParser.add_option("--formatA", dest="formatA",type="string",help="File format for the first file [wig|sam|bed|bar|bw]", default="sam")
  theParser.add_option("--formatB", dest="formatB",type="string",help="File format for the second file [wig|sam|bed|bar|bw]", default=None)
  theParser.add_option("--profile", dest="profileFormat",type="string",help="File format for signal profile output [bdg|bw]", default="bw")
  theParser.add_option("--csize", dest="chromsize",type="string",help="Chromosome sizes table", default=None)
  theParser.add_option("-e","--ewin",dest="expWindow",type="int",help="Expected size of the signal",default=10000)
  theParser.add_option("-s","--profstep",dest="profStep",type="int",help="Step size for profile output file",default=100)
  theParser.add_option("-w","--wstep",dest="wstep",type="int",help="Step size for windowing functions", default=100)
  theParser.add_option("-r","--ratio",dest="peakRatio",type="float",help="Ratio of the max. peak height to split peaks", default=0.4)
  theParser.add_option("-l","--baseline",type="string",dest="baseline",help="Baseline function [min|mad|hist|no]", default="min")
  theParser.add_option("--ichrom",type="string", dest="ichrom",help="Include these chromosomes (comma separated)")
  theParser.add_option("--echrom",type="string", dest="echrom",help="Exclude these chromosomes (comma separated)")
  theParser.add_option("--allchr", dest="allchr",action="store_true", help="Include random and hap chromosomes (comma separated)", default=False)
  theParser.add_option("-p","--peaks",action="store_true",dest="findPeaks",help="Search for peaks and write the BED file", default=False)
  theParser.add_option("-K",type="float",dest="peakInterspersion",help="Peak interspersion factor", default=3)
  theParser.add_option("-q","--quality",dest="mapQuality",type="float",help="Minimum mapping quality (for bam files only)", default=0)
  theParser.add_option("--downA",dest="downSampleA",type="float",help="Downsampling ratio for signal A (works for interval files)", default=1.0)
  theParser.add_option("--downB",dest="downSampleB",type="float",help="Downsampling ratio for signal B (works for interval files)", default=1.0)
  theParser.add_option("--nodup",action="store_true",dest="nodup",help="Naive duplicate removal (works for sorted interval files)", default=False)
  theParser.add_option("--scale",dest="scale", action="store_true", help="Scale data to an human readable range", default=False)
  theParser.add_option("--pvalue",type="float",dest="pvaluefilter",help="filter peaks above the threshold", default=1.0)
  theParser.add_option("--modelfeat",type="string",dest="modelfeat",help="Peak feature to score for p-value [area|length|height]", default="area")
  theParser.add_option("--dist",action="store_true",dest="distType",help="Infer best distribution for data", default=False)
  theParser.add_option("--nosig",action="store_true",dest="nosig",help="Don't perform signal analysis", default=False)
  theParser.add_option("--noprofile",action="store_true",dest="noprof",help="Don't save a profile file", default=False)
  theParser.add_option("--save",action="store_true",dest="npysave",help="Save chromosome data into npy files", default=False)
  theParser.add_option("--stats",action="store_true",dest="savestats",help="Save basic statistics for processed signals", default=False)
  return theParser


def fakeOptions():
  theParser = prepareOptions()
  # This routine should be intended for interactive usage... When you don't have time 
  # to write a full args list in ipython... It fills options with standard values
  # the users have only to change what they want
  (options, args) = theParser.parse_args([])
  options.wstep = 100
  options.ichrom=['chr1', 'chr19']
  options.echrom=[]
  return options
  
  
def parseOptions(theParser):
  (options,args) = theParser.parse_args()
  
  if not options.sigA:
    theParser.print_help()
    sys.exit(1)
    
  options.version = theParser.get_version().split()[1]
  options.bedfile = options.name+".peaks"
  if options.profileFormat == 'bdg':
    options.profile = options.name+".bdg.gz"
  elif options.profileFormat == 'bw':
    # unfortunately wigToBigWig looks at file extension to recognize
    # gzipped files, so I have to add it to the temporary file name
    options.profile = options.name+".bigwig.tmp.gz"
  else:
    Debug(1, "Profile output should be either bedgraph or bigwig")
    sys.exit(101)

  options.lastoffsetA = 0  # This will be used by wig, bed and bar readers
  options.telldictA = {}
  options.lastoffsetB = 0
  options.telldictB = {}

  if options.ichrom:
    options.ichrom = options.ichrom.split(',')
  else:
    options.ichrom = []
  if options.echrom:
    options.echrom = options.echrom.split(',')
  else:
    options.echrom = []
    
  if options.baseline.lower() == 'no':
    options.baseline = False
  
  #capitalize the pipeline steps
  options.pipeline = options.pipeline.upper()
       
  # check that options that reduce to a single dataset are chosen one at time     
  reducingSteps = 0
  for step in options.pipeline:
    if step in 'SLRXVM':
      reducingSteps += 1
  if reducingSteps > 1:
    Debug(1, "You specified", reducingSteps, "steps that reduce the number of signals, only one allowed")
    sys.exit(101)
    
  if not options.sigB:
    # remove subtract from pipeline if control is not present
    removePipelineSteps(options, 'SLRXVM')
  
  #controls on format
  if not options.formatB:
    # there's only a format here...
    options.formatB = options.formatA
  options.formatA = options.formatA.lower()
  options.formatB = options.formatB.lower()
  
  #bam and sam are synonims
  if options.formatA == 'bam':
    options.formatA = 'sam'
  if options.formatB == 'bam':
    options.formatB = 'sam'
  
  # also bigwig and bw
  if options.formatA == 'bigwig':
    options.formatA = 'bw'
  if options.formatB == 'bigwig':
    options.formatB = 'bw'

  if not options.chromsize and True not in [x == y for x in [options.formatA, options.formatB] for y in ['sam', 'bw']]:
    # If there's no chromSize file and no other format is indexed, raise the error
    Debug(1, "Please, provide a table with chromosome sizes")
    sys.exit(101)
  

  if options.pvaluefilter < 1:
    # If searching peaks with p-value, ensure that
    # peak searching is enabled...
    options.findPeaks = True

  options.peakInterspersion = np.abs(options.peakInterspersion)
  if not options.wstep:
    options.wstep = options.expWindow / (options.peakInterspersion + 1)
  
  # this flag will tell if negative peaks are interesting. It will be set True
  # or false by each pipeline step. By default is true as we expect to analyze
  # only positive parts of single signals
  options.zerodata = True
  
  # explicitly keep track of the number of signals under investigation
  options.onesignal = True
  if options.sigB:
    options.onesignal = False

  if options.correlate == True:
    if not options.sigB:
      Debug(1, "Please, provide another file")
      sys.exit(101)
    removePipelineSteps(options, 'SLR')
    # also remove some steps that are pretty much useless here
    options.findPeaks = False
    options.savestats = False
    options.noprof = True
    options.zerodata = False

  
  # some checks on negative values...
  for value in [options.pvaluefilter, options.peakRatio, options.wstep, options.profStep, options.expWindow, options.mapQuality + 1]:
    if value <= 0:
      Debug(1, "Mmm... I'm afraid I won't be able to process negative values such as", value)
      sys.exit(101)
  
  options.windowingFunction = options.windowingFunction.lower()
  if options.windowingFunction not in ['mean', 'med', 'max']:
    options.windowingFunction = 'mean'
    
  
  if options.downSampleA > 1 or options.downSampleA < 0:
    options.downSampleA = 1.0
  if options.downSampleB > 1 or options.downSampleB < 0:
    options.downSampleB = 1.0
  
  options.modelfeat = options.modelfeat.lower()
  if options.modelfeat == 'a':
    options.modelfeat = "area"
  elif options.modelfeat == 'l':
    options.modelfeat = "length"
  elif options.modelfeat == 'h':
    options.modelfeat = "height"
  
  if options.modelfeat not in ["area", "length", "height"]:
    Debug(1, "You have specified an unknown feature to model, switching to default")
    options.modelfeat = "area"
  
  #FIR

  options.firName = options.firName.lower()
  if options.firName not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman', 'gauss', 'flattop', 'morlet', 'nuttall', 'wiener']:
    Debug(1, "Wrong FIR name, switching to default")
    options.firName = 'flat'
  
  # Threshold
  options.threshMode = options.threshMode.lower()
    
  return options      


def debugParameters(options):
  Debug(1, "##", "DSP-ChIP parameters. Version", options.version)
  Debug(1, "##", "Analysis name:", options.name)
  Debug(1, "##", "File for signal A:", options.sigA)
  Debug(1, "##", "File for signal B:", options.sigB)
  Debug(1, "##", "Chromosome sizes file:", options.chromsize)
  Debug(1, "##", "File Format for file A:", options.formatA)
  Debug(1, "##", "File Format for file B:", options.formatB)
  Debug(1, '##', 'Map quality:', options.mapQuality)
  Debug(1, "##", "Remove duplicates:", options.nodup)
  Debug(1, "##", "Downsample ratio for signal A:", options.downSampleA)
  Debug(1, "##", "Downsample ratio for signal B:", options.downSampleB)
  Debug(1, "##", "Profile File Format:", options.profileFormat)
  Debug(1, "##", "Included chromosome:", ','.join(options.ichrom))
  Debug(1, "##", "Excluded chromosomes:", ','.join(options.echrom))
  Debug(1, "##", "Use all chromosomes:", options.allchr)
  Debug(1, "##", "Correlation analysis:", options.correlate)
  Debug(1, "##", "Pipeline in use:", options.pipeline)
  Debug(1, "##", "Wavelet:", options.wavelet)
  Debug(1, "##", "FIR Window name:", options.firName)
  Debug(1, "##", "Baseline function:", options.baseline)
  Debug(1, "##", "Expected signal size:", options.expWindow)
  Debug(1, "##", "Windowing step size:", options.wstep)
  Debug(1, "##", "Thresholding method:", options.threshMode)
  Debug(1, "##", "Profile step size", options.profStep)
  Debug(1, "##", "Search for peaks:", options.findPeaks)
  Debug(1, "##", "Peak Ratio:", options.peakRatio)
  Debug(1, "##", "Peak Interspersion:", options.peakInterspersion)
  Debug(1, "##", "Infer peak distribution model:", options.distType)
  Debug(1, "##", "Data scale:", options.scale)
  Debug(1, "##", "Feature to model:", options.modelfeat)
  Debug(1, "##", "Perform signal analysis:", not options.nosig)
  Debug(1, "##", "Write profile file:", not options.noprof)
  Debug(1, "##", "Save npy data:", options.npysave)
  Debug(1, "##", "Save basic statistics:", options.savestats)


def getChromosomeSizesFromBigWig(bwname):
  csize = {}
  fh = open(os.path.expanduser(bwname), "rb")
  # read magic number to guess endianness
  magic = fh.read(4)
  if magic == '&\xfc\x8f\x88':
    endianness = '<'
  elif magic == '\x88\x8f\xfc&':
    endianness = '>'
  else:
    raise IOError("The file is not in bigwig format")
  # read the header
  (version, zoomLevels, chromosomeTreeOffset, 
  fullDataOffset, fullIndexOffset, fieldCount, definedFieldCount, 
  autoSqlOffset, totalSummaryOffset, uncompressBufSize, reserved) = struct.unpack(endianness + 'HHQQQHHQQIQ', fh.read(60))
  if version < 3:
    raise IOError("Bigwig files version <3 are not supported")
  # go to the data
  fh.seek(chromosomeTreeOffset)
  # read magic again
  magic = fh.read(4)
  if magic == '\x91\x8c\xcax':
    endianness = '<'
  elif magic == 'x\xca\x8c\x91':
    endianness = '>'
  else:
    raise ValueError("Wrong magic for this bigwig data file")
  (blockSize, keySize, valSize, itemCount, reserved) = struct.unpack(endianness + 'IIIQQ', fh.read(28))
  (isLeaf, reserved, count) = struct.unpack(endianness + 'BBH', fh.read(4))
  for n in range(count):
    (key, chromId, chromSize) = struct.unpack(endianness + str(keySize) + 'sII', fh.read(keySize + 2 * 4))
    # we have chrom and size
    csize[key.replace('\x00', '')] = chromSize
  return csize  


def getChromosomeSizes(options):
  # at version 0.7.6 this uses the signal A file to determine the chrom sizes
  chrs = {}
  IncludeThis = False
  
  if options.chromsize:
   # if this file has been provided, use it
    Debug(1, timestring(), "Importing data")
    options.chromsize = os.path.expanduser(options.chromsize)
    for line in open(options.chromsize):
      (theChrom, size) = line.strip().split()
      IncludeThis = False
      if len(options.ichrom) == 0 and len(options.echrom) == 0:
        IncludeThis = True
      if len(options.ichrom) and theChrom in options.ichrom:
        IncludeThis = True
      if len(options.echrom) and theChrom in options.echrom:
        IncludeThis = False
      if options.allchr:
        # Override all other options if set
        IncludeThis = True
      elif '_random' in theChrom or '_hap' in theChrom:
        IncludeThis = False
      if IncludeThis:
        chrs[theChrom] = int(size)
    return chrs
  
  # start a priority cascade to choose the file for reading
  # the headers... sam is preferred over bigwig, sigA over sigB
  if options.formatA == 'sam':
    fileToUse = options.sigA
    fileFormat = 'sam'
  elif options.formatB == 'sam':
    fileToUse = options.sigB
    fileFormat = 'sam'
  elif options.formatA == 'bw':
    fileToUse = options.sigA
    fileFormat = 'bw'
  elif options.formatB == 'bw':
    fileToUse = options.sigA
    fileFormat = 'bw'
  
  if fileFormat == 'sam':
    Debug(1, timestring(), "Getting chromosome data from SAM file", fileToUse)
    try:
      fh = pysam.Samfile(fileToUse, 'rb')
    except IOError:
      Debug(1, timestring(), "Trying to create the index for file", fileToUse)
      pysam.index(fileToUse)
      fh = pysam.Samfile(fileToUse, 'rb')
    for x in fh.header['SQ']:
      IncludeThis = True
      theChrom = x['SN']
      if len(options.ichrom):
        if theChrom in options.ichrom:
          IncludeThis = True
        else:
          IncludeThis = False
      if len(options.echrom):
        if theChrom in options.echrom:
          IncludeThis = False
        else:
          IncludeThis = True
      if options.allchr:
        # Override all other options if set
        IncludeThis = True
      elif '_random' in theChrom or '_hap' in theChrom:
        IncludeThis = False
      if IncludeThis:
        chrs[theChrom] = int(x['LN'])
    fh.close()
  elif fileFormat == 'bw':
    bwInfo = "bigWigInfo"
    fname = os.path.expanduser(fileToUse)
    chrs = getChromosomeSizesFromBigWig(fname)
  return chrs


def getChromFromWig(fname, chrom, csize, options, isChip = 1):

  hasData = False
  chrstart = {}
  b = np.zeros(csize)
  fh = open(fname, 'r', 65536)

  if isChip:
    telldict = options.telldictA.copy()
    lastoffset = options.lastoffsetA
  else:
    telldict = options.telldictB.copy()
    lastoffset = options.lastoffsetB

  try:
    foffset = telldict[chrom]
  except KeyError: 
    foffset = lastoffset

  fh.seek(foffset)
  for (theChrom, pos, val) in bx.wiggle.Reader(fh):
    # first of all be aware that bx.wiggle.Reader has a buffer of 8192 bytes
    try:
      foo = chrstart[theChrom] # A foo assign to test if we've already seen this chrom in this function call
    except KeyError:
      # this is the first time we see the chrom in this function call
      # the chrom data start here
      chrstart[theChrom] = True
      # if the chrom has already been seen in a previous call we can safely rewrite the offset
      # as it should be the same. Otherwise we write (and export it) for the first time
      telldict[theChrom] = fh.tell() - 8192

    lastoffset = fh.tell()  - 8192

    if chrom == theChrom:
      b[pos] = val
      hasData = True
    else:
      # if we already have collected data for this
      # chromosome, just stop...
      try:
        foo = chrstart[chrom] # antoher foo assign. 
        # if we have seen chrom (the requested one) we can stop here
        # we have the offset for the next function call...
        break
      except KeyError:
        # Ok, theChrom not the requested one, go on...
        pass
        
  fh.close()

  if isChip:
    options.telldictA = telldict.copy()
    options.lastoffsetA = lastoffset
  else:
    options.telldictB = telldict.copy()
    options.lastoffsetB = lastoffset
  
  if hasData:
    return b
  else:
    return np.array([])


def getChromFromBed(fname, chrom, csize, options, isChip = 1):
  hasData = False # a flag to check if we have collected some data...
  kept = 0.0
  removed = 0.0
  chrstart = {}
  b = np.zeros(csize)
  fh = open(fname, 'r', 65536)

  if isChip:
    telldict = options.telldictA.copy()
    lastoffset = options.lastoffsetA
    downsample = options.downSampleA
  else:
    telldict = options.telldictB.copy()
    lastoffset = options.lastoffsetB
    downsample = options.downSampleB

  try:
    foffset = telldict[chrom]
  except KeyError: 
    foffset = lastoffset
 
  l = foffset
  fh.seek(foffset)
  prevStart = 0
  prevEnd = 0
  keepThis = True
  for line in fh:
    fields = line.strip().split()
    (theChrom, start, end) = (fields[0], int(fields[1]), int(fields[2]))
    try:
      foo = chrstart[theChrom] # A foo assign to test if we've already seen this chrom in this function call
    except KeyError:
      # this is the first time we see the chrom in this function call
      # the chrom data start here
      chrstart[theChrom] = True
      # if the chrom has already been seen in a previous call we can safely rewrite the offset
      # as it should be the same. Otherwise we write (and export it) for the first time
      telldict[theChrom] = l 

    lastoffset = l 
    #eventually update the offset
    l += len(line)

    if chrom == theChrom:
      if options.nodup and start == prevStart and end == prevEnd:
        keepThis = False
      if downsample < 1:
        r = np.random.uniform()
        if r > downsample:
          keepThis = False
      if keepThis:
        hasData = True # at least one interval has been considered
        b[start:end] += 1
        kept += 1
        prevStart = start
        prevEnd = end
      else:
        removed += 1
      keepThis = True
    else:
      # if we already have collected data for this
      # chromosome, just stop...
      try:
        foo = chrstart[chrom] # antoher foo assign. 
        # if we have seen chrom (the requested one) we can stop here
        # we have the offset for the next function call...
        break
      except KeyError:
        # Ok, theChrom not the requested one, go on...
        pass
        
  fh.close()

  if isChip:
    options.telldictA = telldict.copy()
    options.lastoffsetA = lastoffset
  else:
    options.telldictB = telldict.copy()
    options.lastoffsetB = lastoffset

  if options.nodup or downsample < 1:
    Debug(1, timestring(), "Tags removed:", int(removed), "Tags retained:", int(kept), "Total:", int(kept+removed))

  if hasData:
    return b
  return np.array([])
    

def readAhead(fh):
  buffer = fh.read(4)
  seqgrpname = fh.read(struct.unpack('>i', buffer)[0])
  buffer = fh.read(4)
  seqgrpver = fh.read(struct.unpack('>i', buffer)[0])
  buffer = fh.read(4)
  npar = struct.unpack('>i', buffer)[0]
  for x in range(npar):
    # we don't need these right now
    # but I name variables just to be clear :-)
    buffer = fh.read(4)
    parname = fh.read(struct.unpack('>i', buffer)[0])
    buffer = fh.read(4)
    parvalue = fh.read(struct.unpack('>i', buffer)[0])

  
def getChromFromBar(fname, chrom, csize, options, isChip = 1):
  hasData = False
  
  if isChip:
    telldict = options.telldictA.copy()
  else:
    telldict = options.telldictB.copy()

  b = np.zeros(csize)
  theChrom = ''
  types = {1:'f', 2:'i'}
  barparameters = {}
  fh = open(fname, 'rb')
  buffer = fh.read(8)
  if buffer != 'barr\r\n\x1a\n':
    fh.close()
    Debug(1, "Wrong file format")
    sys.exit(101)
    
  buffer = fh.read(12)
  (version, nseq, ncol) = struct.unpack('>fii', buffer)
  buffer = fh.read(4 * ncol)
  fieldType = struct.unpack('>' + ncol * 'i', buffer)
  packstring = '>'
  for x in fieldType:
    packstring  = packstring + types[x]
  buffer = fh.read(4)
  npar = struct.unpack('>i', buffer)[0]
  for x in range(npar):
    parlen = struct.unpack('>i', fh.read(4))[0]
    parname = fh.read(parlen)
    parlen = struct.unpack('>i', fh.read(4))[0]
    parvalue = fh.read(parlen)
    barparameters[parname] = parvalue
  try:
    # see if the chrom has already been seen
    fh.seek(telldict[theChrom])
    if version >= 2.0:
      try:
        readAhead(fh)
      except struct.error:
        # the chromosome is missing...
        return np.array([])
    nfeatures = struct.unpack('>i', fh.read(4))[0]
  except KeyError:
    # nope... get the next
    seqlen = struct.unpack('>i', fh.read(4))[0]
    theChrom = fh.read(seqlen)
    # from now we have data for the chromosome
    telldict[theChrom] = fh.tell()
    if version >= 2.0:
      readAhead(fh)
    nfeatures = struct.unpack('>i', fh.read(4))[0]
    while theChrom != chrom:
      # mmm... repeat until we get the crhomosome
      # meanwhile store the posistions
      fh.seek(fh.tell() + nfeatures * 4 * ncol)
      seqlen = struct.unpack('>i', fh.read(4))[0]
      theChrom = fh.read(seqlen)
      telldict[theChrom] = fh.tell()
      if version >= 2.0:
        try:
          readAhead(fh)
        except struct.error:
          # the chromosome is missing...
          return np.array([])
      nfeatures = struct.unpack('>i', fh.read(4))[0]
  # at this point, after the try/except block, we are ready to read the chrom values...
  for x in range(nfeatures):
    # sloooooow, we should read a buffer and then
    # process it, unfortunately I'm too lazy now to make it work
    feature = struct.unpack(packstring, fh.read(4 * ncol))
    # is the value always on column 1? Boh?
    if barparameters['scale'] == 'NaturalLog':
      b[feature[0]] = np.exp(feature[1])
    else:
      b[feature[0]] = feature[1]
    hasData = True  

  if isChip:
    options.telldictA = telldict.copy()
  else:
    options.telldictB = telldict.copy()
  if hasData:  
    return b
  else:
    return np.array([])


def inputWrapper(chrom, csize, options, isChip = 1):
  if isChip:
    fname = options.sigA
    fformat = options.formatA
  else:
    fname = options.sigB
    fformat = options.formatB
  
  fname = os.path.expanduser(fname)
  if fformat == 'sam':
    return getChromFromSam(fname, chrom, options, isChip)
  elif fformat == 'bed':
    return getChromFromBed(fname, chrom, csize, options, isChip)
  elif fformat == 'wig':
    return getChromFromWig(fname, chrom, csize, options, isChip)
  elif fformat == 'bar':
    return getChromFromBar(fname, chrom, csize, options, isChip)
  elif fformat == 'bw':
    return getChromFromBigWig(fname, chrom, csize, options)


def getChromFromBigWig(fname, chrom, csize, options = None):
  # create an handler with bx python
  fh = open(fname, 'rb')
  bwh = bx.bbi.bigwig_file.BigWigFile(fh)
  # get the content as array
  data = bwh.get_as_array(chrom, 0, csize)
  # susbstitute NaN with 0, as there's no way to default this
  data[np.isnan(data)] = 0
  fh.close()
  if len(data) == 0: return np.zeros(csize)
  return data
  
def getChromFromSam(fname, chr, options, isChip = 1):
  hasData = False
  kept = 0.0
  removed = 0.0
  if isChip:
    downsample = options.downSampleA
  else:
    downsample = options.downSampleB

  try:
    samfile = pysam.Samfile(fname, 'rb')
  except IOError:
    Debug(1, timestring(), "Trying to create the index for file", fname)
    pysam.index(fname)
    samfile = pysam.Samfile(fname, 'rb')
  l = 0
  for d in samfile.header['SQ']:
    if d['SN'] == chr:
      l = d['LN']
  b = np.zeros(l)
# moving to intervals instead of pileup, so we can implement filter on reads
# and be much more flexible, as all the input function may be unified someday
  try:
    samIterator = samfile.fetch(chr)
  except ValueError:
    Debug(1, timestring(), "Trying to create the index for file", fname)
    samfile.close() #we have to close and reopen to load new index
    pysam.index(fname)
    samfile = pysam.Samfile(fname, 'rb')
    samIterator = samfile.fetch(chr)
  prevStart = 0
  prevEnd = 0
  keepThis = True
  for x in samIterator:
    if x.mapq < options.mapQuality:
      continue
    start = x.pos
    end = start + x.rlen

    if options.nodup and start == prevStart and end == prevEnd:
      keepThis = False
    if downsample < 1:
      r = np.random.uniform()
      if r > downsample:
        keepThis = False
    if keepThis:
      kept += 1
      prevStart = start
      prevEnd = end
      try:
        b[start:end] += 1
        hasData = True
      except IndexError:
        break
    else:
      removed += 1
    keepThis = True
  samfile.close()
  if options.nodup or downsample < 1:
    Debug(1, timestring(), "Tags removed:", int(removed), "Tags retained:", int(kept), "Total:", int(kept+removed))
  if hasData:
    return b
  else:
    return np.array([])


def openBedGraph(options):
  bdgh = gzip.open(options.profile, 'wb')
  header = "track type=bedGraph visibility=full windowingFunction=maximum name=Profile_"+options.name+" description=" + options.pipeline + "\n"
  bdgh.write(header)
  return bdgh


def wipeDir(options):
  # Clean dir from previous bigwig data... otherwise they will be read
  # by this execution and this will likely raise errors
  try:
    os.unlink(options.profile)
  except OSError:
    True


def openPeaks(options):
  bedh = open(options.bedfile, 'w')
  header = "#name\tchromosome\tstart\tend\tarea\theight\tlength\tp-value\n"
  bedh.write(header)
  return bedh


def closeBedGraph(fh):
  fh.close()


def closePeaks(fh):
  fh.close()


def bdg2bw(bdgh, chromLength, options):
  # Since bigwig files can't be opened in 'wb+' mode, at least not
  # using bigwig utilities from Kent src, it is quite stupid to 
  # write a bigwig, and open it to append each chromosome
  # and write another... I believe this scales O(log(n)) where n is the
  # average chrom length... One way out is to write a bedgraph 
  # and convert it to bigwig once complete. This should scale O(n).
  
  bdgFile = bdgh.name
  bigWigFile = bdgFile[:-7] 	# remove .tmp.gz extension
  bdg2bw = "wigToBigWig"
  
  # Create genome.tab file
  gfh = tempfile.NamedTemporaryFile(mode='w', prefix="genome-", dir='.')
  for key in chromLength:
    gfh.write(key + '\t' + str(chromLength[key]) + '\n')
  gfh.flush()

  # close bedgraph filehandler
  bdgh.close()
  Debug(1, timestring(), "Converting bedgraph into bigwig")
  
  cmd1 = subprocess.Popen([bdg2bw, "-clip", bdgFile, gfh.name, bigWigFile], shell=False, stderr=subprocess.PIPE)
  (output, error) = cmd1.communicate()

  # close remove tab file
  gfh.close()
  
  if len(error):
    # these should be essentially Warnings
    Debug(1, "There was this error:\n", error)
  else:  
    # move the temporary file to previous one
    os.unlink(bdgFile)
    Debug(1, timestring(), "Removed bedgraph file and created bigWig")



def writeBigWig(data, options, chromLength, chrom=None):
  # it is not possible to append to an open bigwig file
  # we have to write data into a temporary file and 
  # expand it and concatenate and create the new
  # it will be a mess with stdin/stdot pipes and concatenation
  # good luck!
  previousData = ''
  tmpFile = options.profile + ".tmp"
  fname = options.profile
  gTabName = "genome.tab"
  bw2bdg = "bigWigToBedGraph"
  bdg2bw = "wigToBigWig"

  # Create genome.tab file
  fh = open(gTabName, 'w')
  for key in chromLength:
    fh.write(key + '\t' + str(chromLength[key]) + '\n')
  fh.close()

  Debug(2, timestring(), "Writing profile data in bigwig...")
        
  # Read what has been written so far
  cmd1 = subprocess.Popen([bw2bdg, fname, "stdout"], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=4096)
  (previousData, error) = cmd1.communicate()
  # don't care about errors, previousData has been initialized :-) 

  # append new data to the string
  l = float(len(data))
  step = options.profStep
  for p in range(0, int(l), step):
    Debug(2, timestring(), "Writing profile data in bigwig...", int(np.ceil(p / l * 99)), "% completed")
    if options.windowingFunction == 'mean':
      v = np.mean(data[p:p+step])
    elif options.windowingFunction == 'med':
      v = np.median(data[p:p+step])
    elif options.windowingFunction == 'max':
      v = np.max(data[p:p+step])
    if v == 0: continue
    start = str(int(p))
    if p + step > l:
      end = str(l)
    else:
      end = str(int(p + step))
    value = "%.3e" % v
    previousData = previousData + '\t'.join([chrom, start, end, value]) + '\n\n'

  # open the write process  
  cmd2 = subprocess.Popen([bdg2bw, "-clip", "stdin", gTabName, tmpFile], shell=False, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=4096)
  Debug(1, timestring(), "Writing profile data in bigwig...", "100 % completed")
  (output, error) = cmd2.communicate(previousData)
  
  if len(error):
    # these should be essentially Warnings
    Debug(1, "There was this error", error)
    if "There's more than one value" in error:
      Debug(1, "This chrom has been skipped, the last points were", previousData[-128:])
  else:  
    # remove tab file
    os.unlink(gTabName)
    # mv the temporary file to previous one
    os.rename(tmpFile, fname)
  


def bedgraphLine(chrom, start, end, value):
  return "%s\t%d\t%d\t%.3e\n" % (chrom, start, end, value)


def writeBedGraph(fh, data, options, chrom=None):
  l = len(data)
  step = options.profStep
  startArray = np.array([])
  valArray = np.array([])
  Debug(1, timestring(), "Writing profile data in bedgraph...")
  
  # We can use the numpy facilities to broadcast winfun 
  # calculation over the data... Use somethign we have learned 
  # when we wrote baseline functions...
  # first of all resize data into a stepsized matrix:
  
  if step > l: step = l
  if l % step == 0:
    tmpdata = np.resize(data, (l / step, step))
    tail = np.array([])
    
  else:
    # there's a tail... we have to treat it separately
    # as we can't resize if there are lacking values
    # at least I believe... I continuosly get errors  :-(
    tailLen = l % step
    tail = data[-tailLen:]
    tmpdata = data[:-tailLen]
    tmpdata = np.resize(tmpdata, (l / step, step))

  # now calculate the windowed arrays:
  if options.windowingFunction == 'mean':
    valArray = np.mean(tmpdata, axis = 1)
    if len(tail):
      # if there's a tail, append it's value 
      valArray = np.append(valArray, np.mean(tail))
  elif options.windowingFunction == 'med':
    valArray = np.median(tmpdata, axis = 1)
    if len(tail):
      # if there's a tail, append it's value 
      valArray = np.append(valArray, np.median(tail))
  if options.windowingFunction == 'max':
    valArray = np.maximum.reduce(tmpdata, axis = 1)
    if len(tail):
      # if there's a tail, append it's value 
      valArray = np.append(valArray, np.max(tail))

  startArray = np.arange(0, int(l), step)
  endArray = startArray + step
  if endArray[-1] > l:
    endArray[-1] = l
    
  # get the intervals that should be printed. with bedgraph we can skip
  # all intervals with value = 0
  mask = valArray != 0
  # convert arrays to strings
  
#  valArray = vstr3(valArray, 3)
#  endArray = vstr(endArray)
#  startArray = vstr(startArray)
  
  # would like to buffer this
  
  startArray = startArray[mask]
  endArray = endArray[mask]
  valArray = valArray[mask]
  
  bufLines = 10000
  l = len(startArray)
  nl = 0
  
  #one day I'll make np.vectorize work!
#  vectorLine = np.vectorize(bedgraphLine)
  
  for nl in range(0, l - bufLines, bufLines):
    try:
      fileLines =  [bedgraphLine(chrom, startArray[x], endArray[x], valArray[x]) for x in np.arange(nl, nl + bufLines)]
    except IndexError:
      fileLines =  [bedgraphLine(chrom, startArray[x], endArray[x], valArray[x]) for x in np.arange(nl, l)]
    fh.writelines(fileLines)
    fh.flush()


def writePeaks(fh, data, peaks, options, chrom=None):
  for pn, p in enumerate(peaks):
    start = str(int(p[0]))
    end = str(int(p[1]))
    if p[2] >= options.pvaluefilter:
      continue
    pvalue = "%.3e" % p[2]
    area = "%.3f" % p[3]
    height = "%.3f" %  p[4]
    length = str(int(p[1] - p[0]))
    name = '_'.join(["peak", chrom, str(pn)])
    # write the bed with pvalue as name, so that it may be displayed 
    fh.write('\t'.join([name, chrom, start, end, area, height, length, pvalue]) + '\n')
    if pn % 1000 == 0:
      fh.flush()
  fh.flush()


def npysave(data, *flags):
  fname='_'.join(flags)
  Debug(1, timestring(), "Saving", fname+".npy")
  np.save(fname, data)

  
def saveStats(signal, chrom, options):
  fname = options.name + "_" + chrom + "_stats.txt"
  stats = summaryStatistics(signal)
  fh = open(fname, 'w')
#  if options.distribution != 'hist':
#    fh.write("Warning, these values may be innacurate\n\n")
  fh.write("Average: " + str(stats["average"]) + '\n')
  fh.write("Standard Deviation: " + str(stats["stddev"]) + '\n')
  fh.write("Median: " + str(stats["median"]) + '\n')
  fh.write("Variance: " + str(stats["variance"]) + '\n')
  fh.write("Min.: " + str(stats["min"]) + '\n')
  fh.write("Max: " + str(stats["max"]) + '\n')
  fh.write("Kurtosis: " + str(stats["kurtosis"]) + '\n')
  fh.write("Skewness: " + str(stats["skewness"]) + '\n')
  fh.close()

  
def saveCorrelation(corr_results, chrom, options):
  x = corr_results[0]
  y = corr_results[1]
  (m, b, r, tt, error) = corr_results[3]
  tx = np.array([x.min(), x.max()])
  ty = m*tx + b
  
  lindescr = "y=" + ("%.4f" % m) + "x"
  if b > 0:
    lindescr = lindescr + "+" + ("%.4f" % b) + "\n"
  elif b < 0:
    lindescr = lindescr + ("%.4f" % b) + "\n"
  else:
    lindescr = lindescr + "\n"
  lindescr = lindescr + "r = " + ("%.5f" % r) + "\n"
  lindescr = lindescr + "Error = " + ("%.5f" % error) + "\n"
  lindescr = lindescr + "Spearman r = " + ("%.5f" % corr_results[2][0]) + "\n"
  lindescr = lindescr + "p-value = " + ("%.3e" % corr_results[2][1]) 
  
  fname = options.name + '_' + chrom + "_corr.png"
  fig = plt.figure()

  ax = fig.add_subplot(111)
  ax.set_xlabel(options.sigA)
  ax.set_ylabel(options.sigB)
  ax.set_title('Signal correlation for '+chrom+' (window size: ' + str(options.expWindow) + ' bp)')
  ax.set_xlim([0, x.max()])
  ax.set_ylim([0, y.max()])

  ax.grid()
  ax.plot(x, y, 'b+', tx, ty, 'k--')

  ylim = ax.get_ylim()
  xlim = ax.get_xlim()
  
  yspan = (ylim[1] - ylim[0]) / 5    # five lines
  xspan = (xlim[1] - xlim[0]) / 25   # 25 characters, I believe :-)

  xpos = xlim[0] + xspan
  ypos = ylim[1] - yspan
  
  linAnnotation = ax.text(xpos, ypos, lindescr, family="monospace", size=10)
  linAnnotation.set_bbox(dict(facecolor='blue', alpha=0.5))
  

  fig.savefig(fname, dpi=96)


def parseWavelet(wavelet):
  wdef = ConfigParser.ConfigParser()
  filter_bank = []
  wdef.read(os.path.expanduser(wavelet))
  try:
    name = wdef.get('wavelet', 'name')
  except ConfigParser.NoOptionError:
    name = "CustomWavelet"
  for x in ['dec_lo', 'dec_hi', 'rec_lo', 'rec_hi']:
    s = wdef.get('wavelet', x)
    filter_bank.append([float(t) for t in s.replace(',','').split()])
    
  return (name, filter_bank)
    

  
  
  
