#
# Copyright (c) 2010-2011, Davide Cittaro
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of the <organization> nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#import pdb
import numpy as np
import scipy
import scipy.stats as SS
import scipy.signal as SG
from Helpers import Debug, timestring
import DSP



def PPLZero(data, options):
  # Dummiest function ever... remove negative values
  Debug(1, timestring(), "About to remove all negative values")
  data[data < 0] = 0
  options.zerodata = True
  return data


def PPLAutocorrelate(data, options):
  Debug(1, timestring(), "About to perform autocorrelation")
  data = DSP.autocorrelate(data, options.expWindow, options.wstep)
  return data


def PPLXCorr(data1, data2, options):
  convolve = False
  if 'V' in options.pipeline:
    convolve = True
    Debug(1, timestring(), "About to perform signal convolution")
  else:
    Debug(1, timestring(), "About to perform signal cross-correlation")
  data = DSP.corrconv(data1, data2, options.expWindow, options.wstep, convolve)
  options.onesignal = True
  return data
    
def PPLWavelet(data, options):
  Debug(1, timestring(), "About to perform wavelet denoising")
  data = DSP.wltdenoise(data, options.wavelet, options.expWindow)
  return data


def PPLGaussSmooth(data, options):
  Debug(1, timestring(), "Applying Gaussian filter to smooth data")
  data = DSP.gsmooth(data, options.profStep)
  return data


def PPLScale(data, peakToPeak):
  factor = float(peakToPeak) / np.max(data)  
  Debug(1, timestring(), "Scaling data (", factor, "x )")
  return data * float(factor)

def PPLequalize(data, options):
  Debug(1, timestring(), "Equalizing...")
  return DSP.equalize(data)
  

def PPLnormalize(data, options):
  Debug(1, timestring(), "Normalizing...")
  return DSP.normalize(data)
  

def PPLsubtract(data1, data2, options):
  Debug(1, timestring(), "Subtracting signal B from signal A")
  options.zerodata = False
  options.onesignal = True
  return data1 - data2


def PPLRatio(data1, data2, options):
  Debug(1, timestring(), "Calculating data ratio")
  # To be sure we don't have any zerodivision we set to a relatively small value
  # zero values. This should not influence anything, as we are calculating the
  # difference over a single value... 
  epsilon = 1e-30
  data1[data1 == 0] = epsilon
  data2[data2 == 0] = epsilon
  options.zerodata = False
  options.onesignal = True
  return (data1 - data2) / np.abs(data2)
  
def PPLMult(data1, data2, options):
  # this was strangely missing
  options.zerodata = False
  options.onesignal = True
  return data1 * data2

def PPLL2R(data1, data2, options):
  Debug(1, timestring(), "Calculating data log2ratio")
  # There's a huge room to improve this. Better ideas are welcome!
  # Right now we don't make any check on negative values. Log2Ratio has been here implemented
  # for strictly positive data, either in signal B and signal B. We just add 1 to both
  # to avoid division by 0 or, worst, logarithm of 0
  options.onesignal = True
  return np.log2((data1 + 1) / (data2 + 1))
  

def PPLadd(data1, data2, options):
  Debug(1, timestring(), "Adding signal B to signal A")
  options.onesignal = True
  return data1 + data2
  

def PPLThreshold(data, options):
  Debug(1, timestring(), "Thresholding data with", options.threshMode, "method")
  return DSP.thresholding(data, options.threshMode, options.expWindow)


def PPLCorrelate(theSignalA, theSignalB, options):
  # first we build two vectors made by integrals for windows
  # not sliding... then we calculate the correlation coefficient
  # and ideally we plot the chart possibly with a correlation line :-D
  Debug(1, timestring(), "Correlating signals")
  return DSP.correlateSignals(theSignalA, theSignalB, options.expWindow)


def PPLSmooth(data, options):
  Debug(1, timestring(), "Applying FIR window", options.firName, "to signal")
  return DSP.smooth(data, options.expWindow, options.firName, options.peakInterspersion)


#####
# Here start more specific functions
#####

def growPeak(data, start, end, newStart, newEnd, interMaxDist, options):
  # this function joins peaks consistently with data shape and user options....
  # return True if the peaks should be joined or not...

  if newEnd == end and newStart == start:
    # Also, if the peak is the same...
    return False
  if newEnd <= end or newStart < end:
    # This is trivial... the peaks overlap and the second is included int the first...
    return True

  peak1Data = data[start:end]				# data for the first peak
  peak2Data = data[newStart:newEnd]			# data for the second peak
  btwnData = data[end:newStart]				# data between peaks
  joinData = data[start:newEnd]				# data as the peaks were one
  
  jMax = np.max(joinData)					# the max height for the joined peak
  p1Max = np.max(peak1Data)					# the max height for the first peak
  p2Max = np.max(peak2Data)					# the max height for the second peak
  bMin = SS.scoreatpercentile(btwnData, 5)  # don't use the minimum to avoid extreme division,
  											# get the 5th percentile
  
  if bMin == 0:
    # These peaks are obviously separated :-)
    return False
  
  if bMin <= jMax * options.peakRatio:
    # the valley is deep, don't join
    return False

  if newStart - end >= interMaxDist:
    # peaks are too far, don't join
    return False
    
  if newStart - start >= 2 * interMaxDist:
    # there are cases in which a smoothing artifact can create
    # exceptionally long peaks. This is a way to remove them
    return False
  
  # Ok, join them!
  return True


def findDataPeaks(data, options):
  # This is the real find peaks routine, as data have been
  # splitted postive and negative... Splitting is necessary to treat the negative values without
  # any particular modification to the routine
  
  nW = options.expWindow			# the signal size
  k = options.peakInterspersion		
  interMaxDist = nW * k 			# The max distance we can expect between two peaks
  epsilon = 1e-30
  FlexusIndx = np.array([], dtype = np.int32)	# Where peak boundaries will be stored at first
  FlexAsc = np.array([], dtype = np.int32)		# Where peak starts will be stored
  FlexDes = np.array([], dtype = np.int32)		# Where peak ends will be stored
  grossPeaks = []								# A peak list to be processed further
  
  if options.threshMode and 'T' not in options.pipeline:
    Debug(1, timestring(), "Applying", options.threshMode, "thresholding")
    cdata = DSP.thresholding(data.copy(), options.threshMode, options.expWindow, subtract = False)
    Debug(1, timestring(), "Getting peak boundaries")
    # whe have thresholded data, this means we assume there are foreground values that 
    # emerge from a sea of zeros. We do not search for edges in complicate ways, just
    # get the boundaries of regions above zero
    FlexAsc = np.where((cdata[1:] > 0) & (cdata[:-1] == 0))[0]
    FlexDes = np.where((cdata[:-1] > 0) & (cdata[1:] == 0))[0]
    #FlexAsc finds the beginnings and it may be missing the first position if it is not zero
    if cdata[0] > 0:
      FlexAsc = np.append(0, FlexAsc)
    # the same for FlexDes which find the endings
    if cdata[-1] > 0:
      FlexDes = np.append(FlexDes, len(cdata))
    del(cdata)  
  else:  
    Debug(1, timestring(), "Calculating Sobel filter")
    # The sobel filter is necessary to get the flexus points for our data. Flexus point
    # Will be considered as peak boundaries. Sobel filter is a convenient way to perform
    # second derivative of original data
    sobelFilter = scipy.ndimage.filters.sobel(data)
    # We smooth the filter otherwise we get a ton of boundaries!!! 
    sobelFilter = scipy.ndimage.filters.gaussian_filter1d(sobelFilter, int(10 * np.log10(nW)))
  
    Debug(1, timestring(), "Getting maxima and minima")
    # We need minima to split peaks later on
    # Minima and maxima for original data are when the filter crosses zero
    dataMinMax = DSP.fastZero(sobelFilter + epsilon)
    
    # Now that we have the data maxima and minima
    # we need flexus point that are maxima and minima
    # for filter... we may apply another sobel but no...
    
    if len(dataMinMax) == 0:
      # Oddly enough, there were no peaks... we add two values so that there will be a 
      # single huge peak
      dataMinMax = np.append(0, len(data) - 1)
    
    # Ensure that the first point is 0 and the last is the last data point...
    if dataMinMax[0] != 0: dataMinMax = np.append(0, dataMinMax)
    if dataMinMax[-1] < len(data) - 1: dataMinMax = np.append(dataMinMax, len(data) - 1)
    
    Debug(1, timestring(), "Getting peak boundaries")
    for x in xrange(len(dataMinMax) - 1):
      # For all maxima and minima, get the max and the min position
      # in the sobel filter... these points are the flexus points on original data
      # Also, max(sobel) are the ascending flexa (peak start), min(sobel) are the descending
      # (peak ends)
      pos1 = dataMinMax[x]
      pos2 = dataMinMax[x + 1]
      window = sobelFilter[pos1:pos2]
      s = np.sum(window)
      if s >= 0:
        FlexAsc = np.append(FlexAsc, np.argmax(window) + pos1)
      else:
        FlexDes = np.append(FlexDes, np.argmin(window) + pos1)
    # We don't need the Sobel Filter anymore, delete and free some memroy    
    del(sobelFilter)

  # Now it's time to merge peaks that are likely part of the same enriched region
  nPeaks = float(len(FlexAsc))
  if not nPeaks:
    # don't even try
    return grossPeaks

  Debug(2, timestring(), "Refining and merging", int(nPeaks), 'peaks')
  
  # 2011-21-11 I've realized I've implemented something similar to DBSCAN clustering method
  # Now that I know it, I've decided to implement a true DBSCAN :-)
  
  #initialize
  x = 0				# Counter for the leftmost peak
  y = 1				# Counter for the y-th peak at right
  fxe1 = -1			# End position for the first peak
  fxe2 = -1			# End position for the second peak
  fxs1 = 0			# Start position for the first peak
  fxs2 = 0			# Start position for the second peak
  tmpfx = 0			# A temporary flexus point...
  while x <= len(FlexAsc):
    # Iterate as long as there are peaks
    percComplete = int(np.ceil(x / nPeaks * 100))
    
    try:
      fxs1 = FlexAsc[x]			# Get the first starting point
      fxs2 = FlexAsc[x + y]		# Get the next useful peak
    except IndexError:
      # Ok... this is just a check to make the while loop end
      x += 1
      if fxs1 != fxs2: continue

    try:
      # Get the first end point at the right of the fxs1 (start point of the first peak)
      tmpfx = FlexDes[FlexDes > fxs1][0]
    except IndexError:
      # don't know how to handle this... this means that there's a start without an end
      # so skip to the next start...
      Debug(1, timestring(), "Skipping peak start", fxs1)
      x += 1
      continue
    if tmpfx > fxe1:
      # fxe1 should be updated everytime the peak grows
      # otherwise set it to the first flexus after the first start
      # We fall in this condition at least the first iteration as fxe1 has been initialized to -1
      fxe1 = tmpfx
    try:
      fxe2 = FlexDes[FlexDes > fxs2][0]
    except IndexError:
      # interesting... the last flexus is an ascending one... there's no peak. set it to
      # the end of the chromosome
      fxe2 = len(data)

    # check the vectors are not empty, otherwise I can get some ValueError when looking
    # for max
    if len(data[fxs1:fxe1]) == 0 or len(data[fxs2:fxe2]) == 0:
      # right now jump over...
      x += 1
      continue

    # a peak is between fx1 and fx2 
    if growPeak(data, fxs1, fxe1, fxs2, fxe2, interMaxDist, options):
      # these belong to the same peak check the next
      y += 1
      # update the end of the joined peak
      fxe1 = fxe2
    else:
      # we have two separate peaks, add the first to the list 
      # and start over the second
      pHeight = np.max(data[fxs1:fxe1])						# the actual height
      pArea = np.sum(data[fxs1:fxe1]) 						# the actual area
      														# this should be an integral ^.^
      grossPeaks.append([fxs1, fxe1, 0.5, pArea, pHeight])	# append the defined peak with a dummy p-value
      x = x + y
      y = 1
      Debug(2, timestring(), "Refining and merging", len(FlexAsc), 'peaks', percComplete, "%")
  Debug(1)
  return grossPeaks


def assignProbabilities(data, grossPeaks, options):
  # a separate routine to assign peak probabilities...
  # remember the grossPeaks organization:
  # [[start, end, p-value, area, height], [start, end, p-value, area, height], ...]
  peakBounds = []					# The final peak list
  peakFeatureList = np.array([])		# The peak areas
  nPeaks = float(len(grossPeaks))	# Again, number of peaks...
  
  if options.onesignal:
    # we have removed negative values, we are interested only in 
    # enriched data, so this is a 1-tail p estimation
    oneTail = True
  else:
    oneTail = False
  
  if nPeaks == 0:
    # Why on earth there should be no peaks? Don't know, but return an empty array
    return peakBounds
  
  # We already have peak areas, just put them into an np.array... those are very useful
  if options.modelfeat == 'area':
    peakFeatureList = np.array([np.sum(data[x:x+options.expWindow]) for x in range(0, len(data), options.wstep)])
    #peakFeatureList = np.array([x[3] for x in grossPeaks])
  elif options.modelfeat == 'height':
    peakFeatureList = np.array([np.max(data[x:x+options.expWindow]) for x in range(0, len(data), options.wstep)])
    #peakFeatureList = np.array([x[4] for x in grossPeaks])
  elif options.modelfeat == 'length':
    peakFeatureList = np.array([x[1] - x[0] for x in grossPeaks])

  if options.distType:
    # look for the best pdf among gamma, lognormal, normal, powerlaw
    # this can be done only with scipy >= 0.9
    # we use Q-Q plots and evaluate the best linear fit
    if scipy.__version__ < '0.9.0':  
      Debug(1, timestring(), "Fitting gamma distribution is only available with scipy 0.9.0 or higher")
      options.distType = 'hist'
      return assignProbabilities(data, grossPeaks, options)
    fitRmax = 0.0  
    distToUse = 'gamma'
    parToUse = None
    for distName in ['norm', 'lognorm', 'powerlaw', 'gamma']:
      distPar = eval("scipy.stats."+distName+".fit(peakFeatureList)")
      qqplot = scipy.stats.probplot(peakFeatureList, distPar, dist=distName)
      if qqplot[1][2] > fitRmax:
        fitRmax = qqplot[1][2]
        distToUse = distName
        parToUse = distPar
    Debug(1, timestring(), "Using", distName, "distribution. Parameters:", distPar, "r:", fitRmax)
    dA = eval('scipy.stats.' + distToUse + str(tuple(parToUse)))
    for n, peak in enumerate(grossPeaks):
      percComplete = int(np.ceil(n / nPeaks * 100))
      valueToModel = peak[3]
      pvalue = peak[2]
      cumPvalue = dA.cdf(valueToModel)
      if oneTail or cumPvalue >= 0.5:
        pvalue = 1 - cumPvalue
      else:
        pvalue = cumPvalue
      peakBounds.append([peak[0], peak[1], pvalue, peak[3], peak[4]])  
      Debug(2, timestring(), "Calculating peak probabilities", percComplete, "%")
    Debug(1)
  else:
    # Calculate p-value simply evaluating the data distribution. This is not necessary the
    # best way to do things. It should be said that I don't know a priori the data distribution 
    # model. Also, I don't believe poisson or negative binomial is the proper distribution model
    # I think some hyperbolic or hyperbolic-like model would fit better...
    # Estimate the number of bins
    nbins = np.ceil(np.sqrt(nPeaks))
    if nbins < 10:
      # Just in case if we have less than 10 bins
      nbins = 10

    # When using histogram, there's a major part of data among the expected value... getting
    # p-value for extreme values will raise very low values, we then perform a log transform
    # just to better separate data. Note that we have to ensure that the min value is positive
    minCorrect = np.abs(np.min(peakFeatureList)) + 1
    peakFeatureList = np.log(peakFeatureList + minCorrect) #put everything above 0 and set logarithmic scale
  
    # get the histogram for peak areas
    y, xrC = np.histogram(peakFeatureList, bins=nbins, normed=True)
    xrC = xrC[:-1]
  
    # np.histogram gives the pdf, the integral over the values will be 1, not their sum
    # found that dividing by the sum of all values, we have the pmf, we can use this to 
    # evaluate p at distribution tails. We are interested in finding the p of having a 
    # peak higher than this. THe problem comes with peaks with multiple summits... 
    # should I multply the p of each single peak?
    cy = y.cumsum() / y.sum()
    for n, peak in enumerate(grossPeaks):
      percComplete = int(np.ceil(n / nPeaks * 100))
      # we calculate p-value as 2-tailed distribution
      # in princicple we can't assume a positive value to be at the right tail
      # and viceversa. Just see if the pvalue is higher or lower than 0.5.
      # to have a pvalue check the cumulative distribuition, look for values having a 
      # area higher than this and get the first item (leftmost). This should be the p I'm looking
      # for...
      valueToModel = np.log(peak[3] + minCorrect)
      pvalue = peak[2]
      try:
        cumPvalue = cy[xrC >= valueToModel][0]
      except IndexError:
        # for whatever reason there's no data...
        cumPvalue = 1.0
      if oneTail or cumPvalue >= 0.5:
        pvalue = 1 - cumPvalue
      else:
        pvalue = cumPvalue
      # In the end we have a p-value. Apparently I'm not able to directly set it into
      # grossPeaks... that's way I'm using an additional array...
      peakBounds.append([peak[0], peak[1], pvalue, peak[3], peak[4]])  
      Debug(2, timestring(), "Calculating peak probabilities", percComplete, "%")
    Debug(1)
  return peakBounds


def findPeaks(data, options):
  # To find peaks we first split the data into positive and negative values
  # this allows us to use the very same routines without caring what we are doing :-)
  if options.zerodata:
    # There's no need to split, we already removed negative values
    pbounds = findDataPeaks(data, options)
  else:
    Debug(1, timestring(), "Working on positive values")
    subData = np.zeros(len(data))
    mask = data > 0
    subData[mask] = data[mask]
    pbounds = findDataPeaks(subData, options)
    # negative values can arise from control processing or from mathematical issue
    # i.e. FFT or wavelet introduce negative values. 
    # in the past I used to check negative values only when the control is given
    # to avoid math issues. The problem is that I now can feed dspchip with processed 
    # signals that may have negative values per se.
    Debug(1, timestring(), "Working on negative values")
    # reset subData
    subData[mask] = 0
    # get a new mask for negative values
    mask = data < 0
    # get new subData and set them positive
    subData[mask] = -data[mask]
    # find peaks and add them to previous list
    # remember that area and height should be set negative
    pbounds = pbounds + [[x[0], x[1], x[2], -x[3], -x[4]] for x in findDataPeaks(subData, options)]
    del(subData)
  if not len(pbounds):
    # no peaks here...
    return []
  pbounds.sort()
  pbounds = assignProbabilities(data, pbounds, options)
  return np.asarray(pbounds)


def summaryStatistics(data):
  stats = { "min" : None,
            "max" : None,
            "average" : None,
            "variance" : None,
            "stddev" : None,
            "skewness" : None, 
            "kurtosis" : None,
          }
  tmp = SS.describe(data)
  stats["min"] = tmp[1][0]
  stats["max"] = tmp[1][1]
  stats["average"] = tmp[2]
  stats["variance"] = tmp[3]
  stats["stddev"] = np.sqrt(tmp[3])
  stats["skewness"] = tmp[4]
  stats["kurtosis"] = tmp[5]
  
  stats["median"] = np.median(data)
   
  return stats



  
