#
# Copyright (c) 2010-2011, Davide Cittaro
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of the <organization> nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#

from bx.binned_array import BinnedArray
from Helpers import Debug, timestring
import IO
import pywt
import numpy as np
import scipy.stats 
import scipy.integrate
import scipy.interpolate
import scipy.fftpack 
import scipy.signal 
import scipy.ndimage
import bx.wiggle

def nextpow2(x):
  return np.ceil(np.log2(np.abs(x)))


def zeroPad(data, l_padded, pos = 1):
  l_data = len(data)
  pad = np.zeros(l_padded / 2 - l_data / 2)  # the zeroed tails
  if pos == 0:
    pad_data = np.concatenate([pad, pad, data])
  elif pos == 1:
    pad_data = np.concatenate([pad, data, pad])
  elif pos == 2:
    pad_data = np.concatenate([data, pad, pad])
  if len(pad_data) > l_padded:
    pad_data = pad_data[:-1]
  return pad_data  
  

def fastZero(dog, ext=1):
  # dog, here, is Difference Of Gaussians. 
  # BTW you don't need a dog for this routine to get the zero-crossing points...
  return np.where(dog[:-ext] * dog[ext:] <= 0 )[0]


def mad(data):
  # Calculate the Median Absolute Deviation
  # don't consider nan values, they mess any calculation!
  data = data[np.isnan(data) ^ True]
  median = np.median(data)
  absmed = np.abs(data - median)
  return np.median(absmed)


def getOtsuThreshold(data):
  l_data = len(data)
  threshold = 0
  # Apply data thresholding
  w_b = 0			# background weight
  w_f = 0			# foreground weight
  nLevels = 1024	# number of levels... 64 is low but at least calculation happens in reasonable times
  s_b = np.array([])
  (h, b) = np.histogram(data, bins=nLevels)
  ph = h/float(h.sum())
  b = b[:-1]    
  for threshold in b:
    binMask = b < threshold
    w_b = np.sum(ph[binMask])
    w_f = np.sum(ph[-binMask])
    # it is not practical to calculate the mean of data using the full array, we have to exploit
    # the histogram we have built. What happens is: if I have to calculate np.mean for each level
    # I can spen hours on single chromosomes, so I have to reduce levels which brings to coarse
    # thresholds... on the other side if I have many levels, the error in calculating the mean
    # using histograms is much lower...
    m_b = np.sum(b[binMask] * h[binMask]) / h[binMask].sum()
    m_f = np.sum(b[-binMask] * h[-binMask]) / h[-binMask].sum()
    interClassVariance = w_b * w_f * (m_b - m_f)**2
    if np.isnan(interClassVariance):
      interClassVariance = -np.inf
    s_b = np.append(s_b, interClassVariance)
  return b[np.argmax(s_b)]


def thresholding(data, mode, expWindow, subtract=True):
  l = len(data)
  modes = mode.split(',')
  if len(modes) > 2:
    # if somebody tries to specify more than 2 elements...
    modes = modes[:1]
  elif len(modes) == 1 and modes[0] in ['global', 'local']:
    # if somebody specifies global or local but not method, default to otsu
    modes.append('otsu')
  if  'local' in modes:
    step = 10 * expWindow
    if step > l: step = l
    if modes.index('local'):
      thrMethod = modes[0]
    else:  
      thrMethod = modes[1]
  elif 'global' in modes:
    step = l
    if modes.index('global'):
      thrMethod = modes[0]
    else:  
      thrMethod = modes[1]
  else:
    #default is global, keep it separated because... boh?
    step = l
    thrMethod = modes[0]
  # first of all divide data into chunks
  if l % step == 0:
    tmpData = np.resize(data, (l/step, step))
  else:
    tlen = step - l % step
    tail = np.zeros(tlen)
    tmpData = np.append(data, tail)
    tmpData = np.resize(tmpData, (l/step + 1, step))
  # set a thresholds array to 0, so that if anything goes wrong, it doesn't
  # threshold at all
  thrArray = np.zeros(tmpData.shape[0])
  for x, chunk in enumerate(tmpData):
    if thrMethod.lower() == "otsu":
      thrArray[x] = getOtsuThreshold(chunk)
    elif thrMethod.lower() == "min":
      thrArray[x] = np.min(chunk)
    elif thrMethod.lower() == "mad":
      thrArray[x] == 2 * mad(chunk)
    else:
      Debug(1, timestring(), "WARNING:", thrMethod, "doesn't exist, not thresholding")
      return data
  # now we have the thresholds array, create an interpolate arra
  try:
    interpThr = scipy.interpolate.spline(np.linspace(0, l, tmpData.shape[0]), thrArray, np.arange(l))
  except ValueError:
    interpThr = thrArray[0]
  if subtract:
    data = data - interpThr
    data[data < 0] = 0
  else:
    data[data < interpThr] = 0
  return data  


def getDoG(data, s1 = 1, s2 = 4):
  # Calculate the difference of gaussians. Useful for edge detection?
  gs1 = scipy.ndimage.gaussian_filter1d(data, s1)
  gs2 = scipy.ndimage.gaussian_filter1d(data, s2)
  return gs2 - gs1


def gsmooth(data, expWindow):
  Debug(1, timestring(), "WARNING: Gaussian filter processing may drain your RAM")
  #smoothFactor = int(1 + np.log10(expWindow))
  smoothFactor = int(1 + expWindow / 2)
  return scipy.ndimage.gaussian_filter1d(data, smoothFactor)


def autocorrelate(data, expWindow, wstep):
  # Autocorrelation of the signal...
  window = expWindow
  step = wstep
  l = len(data)
  #pad data
  data = zeroPad(data, window * 2)
  Debug(1, timestring(), "Autocorrelation: window size", expWindow, "step size:", wstep)
  y = np.array([np.correlate(data[x:x+window], data[x:x+window], mode='valid') for x in  np.arange(0, len(data) - window, step)]).ravel()
  x = np.arange(0, l, step)
  Debug(1,timestring(),  "Getting the linear interpolation")
  s = scipy.interpolate.interp1d(x, y, bounds_error=False, fill_value=0)
  del(x)
  del(y)
  Debug(1,timestring(),  "Rebuilding the signal")

  # better rebuilding it in chunks using python map instead of a single interpolation
  # apparently this saves memory and time...
  intrpData = np.array([s(np.arange(x, x + expWindow)) for x in np.arange(0, l, expWindow)]).ravel()
  
  if len(intrpData) > l:
    return intrpData[:l]
  trimL = l - len(intrpData)
  intrpData =  np.append(intrpData, s(np.arange(trimL,l)))
  return intrpData


def corrconv(data1, data2, expWindow, wstep, convolve = False):
  # Cross-correlate or convolve two signals, to obtain a new cross-correlated or convolved signal
  # xcorrelation is much like a subtraction, convolution the opposite?
  # assume data1 and data2 have same length
  window = expWindow
  step = wstep
  l = len(data1)
  if l != len(data2):
    Debug(1, "Both data should have the same length for this cross-correlation")
    return None
  #pad data
  data1 = np.append(data1, np.zeros(window / 2))
  data1 = np.append(np.zeros(window / 2), data1)
  data2 = np.append(data2, np.zeros(window / 2))
  data2 = np.append(np.zeros(window / 2), data2)
  if convolve:
    Debug(1, timestring(), "Convolution: window size", expWindow, "step size:", wstep)
  else:
    Debug(1, timestring(), "Cross-correlation: window size", expWindow, "step size:", wstep)
  if convolve:
    # I know I can use -data2 to convolve instead of a separate np call... but I trust numpy...
    y = np.array([np.convolve(data1[x:x+window], data2[x:x+window], mode='valid') for x in  np.arange(0, len(data1) - window, step)]).ravel()
  else:
    y = np.array([np.correlate(data1[x:x+window], data2[x:x+window], mode='valid') for x in  np.arange(0, len(data1) - window, step)]).ravel()
  x = np.arange(0, l, step)
  Debug(1,timestring(),  "Getting the linear interpolation")
  s = scipy.interpolate.interp1d(x, y, bounds_error=False, fill_value=0)
  del(x)
  del(y)
  if convolve:
    Debug(1,timestring(),  "Rebuilding the convolved signal")
  else:
    Debug(1,timestring(),  "Rebuilding the cross-correlated signal")

  # Interpolation
  intrpData = np.array([s(np.arange(x, x + expWindow)) for x in np.arange(0, l, expWindow)]).ravel()
  
  if len(intrpData) > l:
    return intrpData[:l]
  trimL = l - len(intrpData)
  intrpData =  np.append(intrpData, s(np.arange(trimL,l)))
  return intrpData


def wltdenoise(data, wavelet, expWindow):
  # Use wavelets to remove noise
  wlt = None		# The wavelet to use
  # Instantiate the wavelet
  try:
    wlt = pywt.Wavelet(wavelet)
  except ValueError:
    # the wavelet doesn't exist in pywt, try to read the specified file
    (name, filter_bank) = IO.parseWavelet(wavelet)
    wlt = pywt.Wavelet(name=name, filter_bank=filter_bank)
  if not wlt:
    Debug(1, "there was an error in building the wavelet")
    return None
  Debug(1, timestring(), "Getting wavelet coefficients for wavelet", wavelet)

  # Estimate which is the level we will use, according to the expected signal size
  useLevel, wlen = estimateWaveLevel(wlt, expWindow)

  # Perform the wavelet decomposition
  coeffs = pywt.wavedec(data, wlt, level=useLevel, mode='sp1')
  Debug(1, timestring(), "Filtering coefficients. Decomposition level:", useLevel, "Wavelet peak width:", wlen)

  # Set all the useless level to zero before recomposition
  for i in range(1, len(coeffs)):
    coeffs[i] *= 0
  Debug(1, timestring(), "Rebuilding signal")

  # Get the signal back!
  signal = pywt.waverec(coeffs, wlt, mode='sp1')
  return signal


def estimateWaveLevel(wavelet, expWindow):
  # Instead of using the wavelet psi function length
  # we check the "useful" wavelet portion, i.e. the 
  # at which level the size of the wavelet above
  # zero is comparable with the expected signal size...
  f = wavelet.wavefun()[0]
  l = len(f)
  z = fastZero(f)
  m = np.where(f == np.max(f))[0][0]
  p1 = z[np.where(z <= m)[0][-1]]
  p2 = z[np.where(z >= m)[0][0]]
  r = l / (p2 - p1)
  tmpdiff = np.inf
  olddiff = np.inf
  level = 1
  rLevel = 1
  while 1:
    l = len(wavelet.wavefun(level = level)[0]) / r
    thisdiff = np.abs(l - expWindow)
    if thisdiff <= olddiff:
      tmpdiff = olddiff
      olddiff = thisdiff
      rLevel = level
    else:
      break
    level += 1
  return rLevel, l /2


def equalize(data, minimum = 0, maximum = 1):
  nbins = np.ceil(np.log2(len(data)) + 1)
  dmin = data.min()
  dmax = data.max()
#  if nbins < 256:
#    nbins = 256
  lookup = []
  hist, bins = np.histogram(data, nbins)
  cdf = hist.cumsum() * 1.0 / hist.sum()
  ecdf = np.linspace(cdf.min(), cdf.max(), len(cdf))
  [lookup.append(ecdf[np.abs(i - ecdf).argmin()]) for i in cdf]
  lookup = np.array(lookup)
  # equalize
  rdata = data.copy()
  for x, bin in enumerate(bins[:-1]):
    mask = (data >= bin) #& (data < bins[x + 1])
    rdata[mask] = lookup[x]
  return (rdata - rdata.min()) / (rdata.max() - rdata.min()) * (dmax - dmin) + dmin


def normalize(data):
  # Normalize signal energy
  return data / np.sqrt(np.sum(data**2))

  
def correlateSignals(data1, data2, expWindow):
  # this is to calculate linear regression and correlation coefficient of data
  l = len(data1) #assume data1 and data2 are equal size :-)
  m = l % expWindow
  
  dr1 = np.resize(data1, (l / expWindow, expWindow))
  dr2 = np.resize(data2, (l / expWindow, expWindow))
  
  if m:
    # there may be a tail not include in resized data...
    dt1 = data1[-m:]
    dt2 = data2[-m:]
  
  #get integrals
  ir1 = scipy.integrate.trapz(dr1)
  ir2 = scipy.integrate.trapz(dr2)
  
  if m:
    ir1 = np.append(ir1, scipy.integrate.trapz(dt1))
    ir2 = np.append(ir2, scipy.integrate.trapz(dt2))
  
  # We use Spearman coefficient because we can't assume data are normally distributed
  # actually they should be poisson distributed
  if len(ir1) > 2:
    (ccoef, cpv) = scipy.stats.spearmanr(ir1, ir2)
    (m, b, r, tt, error) = scipy.stats.linregress(ir1, ir2)
  else:
    (ccoef, cpv, m, b, r, tt, error) = (0,0,0,0,0,0,0)
  
  return (ir1, ir2,(ccoef, cpv), (m, b, r, tt, error))


def overlapAdd(data, FIR, *n):
  # use OLA to convolve a filter to a signal
  l_data = len(data)
  l_FIR = len(FIR)

  if len(n):
    # Use the specified FFT length (rounded up to the nearest
    # power of 2), provided that it is no less than the filter
    # length:
    n = n[0]
    if n != int(n) or n <= 0:
      raise ValueError('n must be a nonnegative integer')
    if n < l_FIR:
      n = l_FIR
    l_fft = 2**nextpow2(n)
  else:
    if l_data > l_FIR:
      # When the filter length is smaller than the signal,
      # choose the FFT length and block size that minimize the
      # FLOPS cost. Since the cost for a length-N FFT is
      # (N/2)*log2(N) and the filtering operation of each block
      # involves 2 FFT operations and N multiplications, the
      # cost of the overlap-add method for 1 length-N block is
      # N*(1+log2(N)). For the sake of efficiency, only FFT
      # lengths that are powers of 2 are considered:
      N = 2**np.arange(np.ceil(np.log2(l_FIR)),np.floor(np.log2(l_data)))
      if not len(N):
        l_fft = 2**nextpow2(l_FIR+l_data-1)
      else:  
        cost = np.ceil(l_data/(N-l_FIR+1))*N*(np.log2(N)+1)
        l_fft = N[np.argmin(cost)]
    else:
      # When the filter length is at least as long as the signal,
      # filter the signal using a single block:
      l_fft = 2**nextpow2(l_FIR+l_data-1)

  l_fft = int(l_fft)
  
  # Compute the block length:
  L = int(l_fft - l_FIR + 1)
  
  Debug(1, timestring(), "fft window:", l_fft, "block length:", L)
  
  # Compute the transform of the filter:
  H = np.fft.rfft(FIR,l_fft)

  y = np.zeros(l_data,float)
  i = 0
  while i <= l_data:
    il = np.min([i+L,l_data])
    k = np.min([i+l_fft,l_data])
    yt = np.fft.irfft(np.fft.rfft(data[i:il],l_fft)*H,l_fft) # Overlap..
    y[i:k] = y[i:k] + yt[:k-i]      # and add
    i += L
  return y


def smooth(data, expWindow=10, window='hanning', K=3):
  #s=np.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
  if window == 'flat': #moving average
    FIR=np.ones(expWindow,'d')
  elif window == 'gauss':
    FIR = scipy.signal.gaussian(K * expWindow, expWindow)
  elif window == 'wiener':
    return scipy.signal.wiener(data, expWindow)
  else:
    FIR=eval('scipy.signal.'+window+'(expWindow)')
  # ensure the signal has a normalized energy
  FIR = FIR / np.sqrt(np.sum(np.power(FIR, 2)))
  y=overlapAdd(zeroPad(data, len(data) + expWindow), FIR)
  return y[expWindow:]


# Here starts the zombie function zone...


#def fillSparse(sparsedata, length, step=10):
#  outdata = np.zeros(length)
#  x = 0
#  for i in range(step):
#    try:
#      outdata[i::step] = sparsedata[:len(sparsedata) - x]
#    except ValueError:
#      x += 1
#      outdata[i::step] = sparsedata[:len(sparsedata) - x]
#  return outdata
#
#def buildWavelet(window_len=16, window="flattop"):
#  # This function will be useful when I will put custom wavelets
#  dec_lo = eval('scipy.signal.'+window+'(window_len)')
#  dec_lo = dec_lo * np.sqrt(2) / sum(dec_lo)
#  dec_lo = np.append(np.zeros(window_len/4), dec_lo)
#  dec_lo = np.append(dec_lo, np.zeros(window_len/4*3))
#  rec_lo = dec_lo[::-1]
##  rec_hi = np.array(dec_lo)
##  for i in range(len(dec_lo)):
##    if i % 2 == 1:
##      rec_hi[i] = -rec_hi[i]
#  dec_hi = getDoG(dec_lo)
#  rec_hi = dec_hi[::-1]
#  w = pywt.Wavelet(name=window, filter_bank = [dec_lo, dec_hi, rec_lo, rec_hi])
##  w.orthogonal = True
#  return w


