#!/usr/bin/env python
"""
Copyright Ian Ross Williams, 2012

    This file is part of ArcSecond.

    ArcSecond is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    ArcSecond is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with ArcSecond.  If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import Image, ImageDraw
import scipy.stats as stats
import scipy.ndimage as ndi
from scipy.ndimage import (gaussian_filter,
                           generate_binary_structure, binary_erosion, label)
import pywt

def normalise(nim):
    mn,mx=np.min(nim),np.max(nim)
    if np.abs(mx-mn)>0:
        return (nim-mn)/(mx-mn)*255.
    elif mx<128:
        return np.zeros(nim.shape)
    else:
        return np.ones(nim.shape)*255.
 
def waveletDenoisePIL(im,noiseSigma):
    mode=im.mode
    bands = im.getbands()
    im.load()  #Call the load routine to work round a bug in PIL 1.17 
    channels = im.split()
    out=range(len(bands))
    for i,channel in enumerate(channels): 
        nim = np.asarray(channel)
        nim = waveletDenoise(nim,noiseSigma)
        out[i] = Image.fromarray(nim)
        out[i]=out[i].convert("L")
    
    if len(bands)>1:
        return Image.merge("RGB",(out[0],out[1],out[2]))
    else:
        return out[0].convert(mode)

def waveletDenoise(u,noiseSigma):
    wavelet = pywt.Wavelet('bior6.8')
    levels  = int( np.log2(u.shape[0]) )
    waveletCoeffs = pywt.wavedec2( u, wavelet, level=levels)
    threshold=noiseSigma*np.sqrt(2*np.log2(u.size))
    NWC = map(lambda x: pywt.thresholding.soft(x,threshold), waveletCoeffs)
    u = pywt.waverec2( NWC, wavelet)[:u.shape[0],:u.shape[1]]
    return u
    
#Vector median demosaic
(DEBAYER_NONE, DEBAYER_BGGR, DEBAYER_GRBG) = range(3)
(DEBAYER_VECTOR_MEDIAN, DEBAYER_VECTOR_MEAN) = range(2)
def demosaic(im,pattern,method=DEBAYER_VECTOR_MEDIAN):
    nim = np.asarray(im)
    print "Demosaic:",nim.shape
    if len(nim.shape)==3:
        nim = np.mean(nim, axis=2)

    xs,ys = nim.shape[0],nim.shape[1]
    xr,yr = np.arange(xs),np.arange(ys)
    xx,yy = np.meshgrid(yr,xr)
    red   = np.zeros(nim.shape)
    green = np.zeros(nim.shape)
    blue  = np.zeros(nim.shape)
    
    nimXP = np.roll(nim, 1,axis=0)
    nimXM = np.roll(nim,-1,axis=0)
    nimYP = np.roll(nim, 1,axis=1)
    nimYM = np.roll(nim,-1,axis=1)
    nimXPYP = np.roll(nimXP, 1,axis=1)
    nimXMYP = np.roll(nimXM, 1,axis=1)
    nimXPYM = np.roll(nimXP,-1,axis=1)
    nimXMYM = np.roll(nimXM,-1,axis=1)
    nx, ny = nim.shape[0], nim.shape[1]
    xx, yy = np.mgrid[0:nx,0:ny]
    
    #Building pixel masks
    if pattern==DEBAYER_GRBG:
        G1mask = ((xx % 2)==0) & ((yy % 2)==0)
        Rmask  = ((xx % 2)==1) & ((yy % 2)==0)
        Bmask  = ((xx % 2)==0) & ((yy % 2)==1)
        G2mask = ((xx % 2)==1) & ((yy % 2)==1)
    elif pattern==DEBAYER_BGGR:
        G1mask = ((xx % 2)==0) & ((yy % 2)==1)
        Rmask  = ((xx % 2)==1) & ((yy % 2)==1)
        Bmask  = ((xx % 2)==0) & ((yy % 2)==0)
        G2mask = ((xx % 2)==1) & ((yy % 2)==0)
    
    if pattern==DEBAYER_GRBG or pattern==DEBAYER_BGGR:
        G1 = np.zeros([3,nim.shape[0],nim.shape[1]])
        G2 = np.zeros([3,nim.shape[0],nim.shape[1]])
        R  = np.zeros([3,nim.shape[0],nim.shape[1]])
        B  = np.zeros([3,nim.shape[0],nim.shape[1]])
        
        #Building virtual pixels
        G1 = [[nimXM, nim    , nimYP],
              [nimXP, nim    , nimYP],
              [nimXM, nim    , nimYM],
              [nimXP, nim    , nimYM]]
        
        G2 = [[nimYP, nim    , nimXM],
              [nimYP, nim    , nimXP],
              [nimYM, nim    , nimXM],
              [nimYM, nim    , nimXP]]

        R = [[nim, nimXM, nimXMYM],
             [nim, nimYM, nimXMYM],
             [nim, nimXP, nimXPYM],
             [nim, nimYM, nimXPYM],
             [nim, nimXM, nimXMYP],
             [nim, nimYP, nimXMYP],
             [nim, nimXP, nimXPYP],
             [nim, nimYP, nimXPYP]]

        B = [[nimXMYM, nimXM, nim],
             [nimXMYM, nimYM, nim],
             [nimXPYM, nimXP, nim],
             [nimXPYM, nimYM, nim],
             [nimXMYP, nimXM, nim],
             [nimXMYP, nimYP, nim],
             [nimXPYP, nimXP, nim],
             [nimXPYP, nimYP, nim]]
        
        #Calculate the real pixel values from the median of the virtual pixels 
        if method==DEBAYER_VECTOR_MEDIAN:
            G1 = np.median(G1,axis=0)
            G2 = np.median(G2,axis=0)
            R  = np.median(R ,axis=0)
            B  = np.median(B ,axis=0)
        #Calculate the real pixel values from the mean of the virtual pixels (faster but more susceptible to noise)
        elif method==DEBAYER_VECTOR_MEAN:
            G1 = np.mean(G1,axis=0)
            G2 = np.mean(G2,axis=0)
            R  = np.mean(R ,axis=0)
            B  = np.mean(B ,axis=0)
            
        #Form the final image channels
        red   = np.array(nim)
        green = np.array(nim)
        blue  = np.array(nim)
        red  [G1mask] = G1[0][G1mask]
        red  [Rmask]  = R [0][Rmask ]
        red  [Bmask]  = B [0][Bmask ]
        red  [G2mask] = G2[0][G2mask]
        green[G1mask] = G1[1][G1mask]
        green[Rmask]  = R [1][Rmask ]
        green[Bmask]  = B [1][Bmask ]
        green[G2mask] = G2[1][G2mask]
        blue [G1mask] = G1[2][G1mask]
        blue [Rmask]  = R [2][Rmask ]
        blue [Bmask]  = B [2][Bmask ]
        blue [G2mask] = G2[2][G2mask]
        
    #Convert numpy arrays to PIL images
    imr = Image.fromarray(red.astype(np.int8)  , "L")
    img = Image.fromarray(green.astype(np.int8), "L")
    imb = Image.fromarray(blue.astype(np.int8) , "L")
    im = Image.merge( "RGB", (imr, img, imb))
    return im
    
def meanFFTtile(nim,size):
    rows = int(nim.shape[0]/size)
    cols = int(nim.shape[1]/size)
    
    fftStack=[]
    for row in range(rows):
        for col in range(cols):
            r0,c0=(size*row,size*col)
            r1,c1=(size+r0 ,size+c0)
            tile = nim[r0:r1,c0:c1]
            
            fftTile = np.log(1e-8+np.abs(np.fft.rfft2(tile))**2)
            if len(fftStack)==0:
                fftStack = [fftTile]
            else:
                fftStack = np.append(fftStack,[fftTile], axis=0)
                        
    a= np.mean(fftStack,axis=0)
    return a    

def medianFFTtile(nim,size):
    rows = int(nim.shape[0]/size)
    cols = int(nim.shape[1]/size)
    
    fftStack=[]
    for row in range(rows):
        for col in range(cols):
            r0,c0=(size*row,size*col)
            r1,c1=(size+r0 ,size+c0)
            tile = nim[r0:r1,c0:c1]
            
            fftTile = np.log(1e-8+np.abs(np.fft.rfft2(tile))**2)
            if len(fftStack)==0:
                fftStack = np.array([fftTile])
            else:
                print fftStack.shape,fftTile.shape
                safeTile = fftTile[:fftStack.shape[1],:fftStack.shape[2]]
                print fftStack.shape,safeTile.shape
                fftStack = np.append(fftStack,[safeTile], axis=0)
                        
    a= np.median(fftStack,axis=0)
    return a

#This filter first splits an image into square tiles of the specified size (e.g. 64 pixels).
#Then an FFT is applied to each tile and the log power spectrum, i.e. log(abs(FFT)^2) is
#calculated for each square.  The average of these power spectra will have peaks for
#spatial frequencies that are present in every tile of the image on top of a relatively
#uniform background distribution from the rest of the image.  The peaks are isolated from the mean power spectra
#by application of a high-pass filter and then identification of local-maxima.  
#The resultant spectra can then be used to filter the full FFT of the original image, 
#being careful to preserve low frequencies and also the original phase information.     
def destripe(im,size,percentile):
    print "Destripe"
    mode=im.mode
    bands = im.getbands()
    im.load()  #Call the load routine to work round a bug in PIL 1.17 
    channels = im.split()
    out=range(len(bands))
    for i,channel in enumerate(channels): 
        nim = np.asarray(channel)
        fft = np.fft.rfft2(nim)
        fftStack = normalise(np.abs(medianFFTtile(normalise(nim),size)))

        #Try to identify peaks while avoiding image data
        lowpass = ndi.gaussian_filter(fftStack, 3)
        highpass = normalise(fftStack - lowpass)
        localmaxima = ndi.maximum_filter(highpass,4,mode='constant',cval=0)
        scoreatpercentile = stats.scoreatpercentile(highpass.flatten(), percentile)
        filter = np.zeros(fftStack.shape)
        filter[highpass>=0.99*localmaxima]=1.
        filter[highpass<scoreatpercentile]=0.
        #Ignore frequencies lower than can be seen by the tile size
        lfx=int(0.5*size**2/(int(fft.shape[1]/size)*size))
        lfy=int(0.5*size**2/(int(fft.shape[0]/size)*size))
        filter[0:lfy,0:lfx]=0.
        filter[filter.shape[0]-lfy:filter.shape[0]:,0:lfx]=0.
        #Resize the filter to the same size as the full fft
        imFilter = Image.fromarray(filter)
        imFilter = imFilter.resize( (fft.shape[1],fft.shape[0]), Image.BILINEAR)
        filter = np.asarray(imFilter)
        
        power = np.log(1.e-8 + np.abs(fft)**2)
        power=power - 5.*np.max(power)*filter
        
        mag = np.exp(0.5*power)
        phase = np.angle(fft)
        filtered = mag*np.exp(1j*phase) 
        nim = np.fft.irfft2(filtered)
        
        """
        imtmp = Image.fromarray(255.*normalise(fftStack))
        imtmp = imtmp.convert("L")
        imtmp.save("meantile_fft_%s.png" % bands[i]) 
        
        meanTile=np.fft.irfft2(np.exp(np.sqrt(fftStack)))
        imtmp=Image.fromarray(255.*normalise(meanTile))
        imtmp=imtmp.convert("L")
        imtmp.save("meantile.png_%s.png" % bands[i])
        
        imtmp = Image.fromarray(255.*normalise(np.log(1e-8+np.abs(fft)**2)))
        imtmp=imtmp.convert("L")
        imtmp.save("fft_nim_%s.png" % bands[i])
        
        imtmp = Image.fromarray(255.*normalise(filter))
        imtmp=imtmp.convert("L")
        imtmp.save("filter_%s.png" % bands[i])
        
        imtmp = Image.fromarray(255.*normalise(highpass))
        imtmp=imtmp.convert("L")
        imtmp.save("highpass_%s.png" % bands[i])
    
        imtmp = Image.fromarray(255.*normalise(np.log(1e-8+np.abs(filtered)**2)))
        imtmp=imtmp.convert("L")
        imtmp.save("fft_filtered_%s.png" % bands[i])
        """
        
        out[i] = Image.fromarray(255.*normalise(nim))
        out[i]=out[i].convert("L")
        #out[i].save("filtered.png_%s.png" % bands[i])
    
    if len(bands)>1:
        return Image.merge("RGB",(out[0],out[1],out[2]))
    else:
        return out[0].convert(mode)

def rgb_to_hsv(im):
    i = im.convert('RGB')
    a = np.asarray(i, int)
    
    R, G, B = a.T
    
    m = np.min(a,2).T
    M = np.max(a,2).T
    
    C = M-m #chroma
    Cmsk = C!=0
    
    # Hue
    H = np.zeros(R.shape, int)
    mask = (M==R)&Cmsk
    H[mask] = np.mod(60*(G-B)/C, 360)[mask]
    mask = (M==G)&Cmsk
    H[mask] = (60*(B-R)/C + 120)[mask]
    mask = (M==B)&Cmsk
    H[mask] = (60*(R-G)/C + 240)[mask]
    H *= 255
    H /= 360 # if you prefer, leave as 0-360, but don't convert to uint8
    
    # Value
    V = M
    
    # Saturation
    S = np.zeros(R.shape, int)
    S[Cmsk] = ((255*C)/V)[Cmsk]
    return H,S,V
        
    # H, S, and V are now defined as integers 0-255

def rgb_to_hsl(im):
    i = im.convert('RGB')
    a = np.asarray(i, np.int)
    
    R, G, B = a.T
    R, G, B = R.T, G.T, B.T
    m = np.min(a,2)
    M = np.max(a,2)
    
    C = M-m #chroma
    Cmsk = (C!=0)
    
    # Hue
    H = np.zeros(R.shape, np.int)
    mask = (M==R)&Cmsk
    H[mask] = np.mod(60*(G-B)/C, 360)[mask]
    mask = (M==G)&Cmsk
    H[mask] = (60*(B-R)/C + 120)[mask]
    mask = (M==B)&Cmsk
    H[mask] = (60*(R-G)/C + 240)[mask]
    H *= 255
    H /= 360 # if you prefer, leave as 0-360, but don't convert to uint8
    
    # Value
    L = (R+G+B)/3
    
    # Saturation
    S = np.zeros(R.shape, np.int)
    S[Cmsk] = ((255*C)/M)[Cmsk]
    return H,S,L
    # H, S, and L are now defined as integers 0-255

def hsl_to_rgb(H,S,L):
    R = np.zeros(H.shape, np.float32)
    G = np.zeros(H.shape, np.float32)
    B = np.zeros(H.shape, np.float32)
    
    #Chroma
    C = (1.-np.abs(2.*L/255.-1.))*S/255.
    Cmsk = C!=0
    
    #Hue from 0 to 6
    Hd = (360.*H/255.)/60.0
    X = C*(1. - np.abs(np.mod(Hd,2.)-1.))

    mask = (Hd>=0)&(Hd<1)&Cmsk 
    R[mask],G[mask],B[mask] = (C[mask], X[mask], 0)
    mask = (Hd>=1)&(Hd<2)&Cmsk 
    R[mask],G[mask],B[mask] = (X[mask], C[mask], 0)
    mask = (Hd>=2)&(Hd<3)&Cmsk 
    R[mask],G[mask],B[mask] = (0      , C[mask], X[mask])
    mask = (Hd>=3)&(Hd<4)&Cmsk 
    R[mask],G[mask],B[mask] = (0      , X[mask], C[mask])
    mask = (Hd>=4)&(Hd<5)&Cmsk 
    R[mask],G[mask],B[mask] = (X[mask], 0      , C[mask])
    mask = (Hd>=5)&(Hd<6)&Cmsk 
    R[mask],G[mask],B[mask] = (C[mask], 0      , X[mask])
    
    m = L/255. - C/2.
    R, G, B = (R + m, G + m, B + m)
    R*=255
    G*=255
    B*=255
    return R,G,B     

#Find the geometric median of an image
def geometric_median(nim,threshold=None,tolerance=0.1,iterations=10):
    if threshold==None:
        threshold=np.min(nim)
    msk = nim>threshold
    xx,yy = np.meshgrid(xrange(nim.shape[1]),xrange(nim.shape[0]))

    gmx,gmy=np.mean(xx[msk]), np.mean(yy[msk])
    for i in range(iterations):
        gmxNew = np.sum(xx[msk]/np.sqrt( (xx[msk]-gmx)**2 + (yy[msk]-gmy)**2))
        gmxNew = gmxNew / np.sum(1./np.sqrt( (xx[msk]-gmx)**2 + (yy[msk]-gmy)**2))
        gmyNew = np.sum(yy[msk]/np.sqrt( (xx[msk]-gmx)**2 + (yy[msk]-gmy)**2))
        gmyNew = gmyNew / np.sum(1./np.sqrt( (xx[msk]-gmx)**2 + (yy[msk]-gmy)**2))
        resid = np.sqrt( (gmxNew-gmx)**2 + (gmyNew-gmy)**2)
        gmx,gmy=gmxNew,gmyNew
        if resid<=tolerance: break
        
    return gmx,gmy

#Perform the circular Hough transform using circle diameters.
def houghtf(img):
    if img.ndim != 2:
        raise ValueError("Input must be a two-dimensional array")

    img = img.astype(bool)
    w,h = img.shape
    centres = np.zeros((w,h),dtype=np.float64)
    r2 = np.zeros((w,h),dtype=np.float64)
    
    #select the x and y coordinates of bright pixels only
    x,y = np.mgrid[:w,:h]
    xselect=x[img]
    yselect=y[img]
    
    #for i in range(0,len(xselect),len(xselect)/10):
    for n in range(0,3000):
        i=np.random.randint(0,len(xselect)-1)
        j=np.random.randint(0,len(xselect)-1)
        k=np.random.randint(0,len(xselect)-1)
        while i==j:
            j=np.random.randint(0,len(xselect)-1)
        while i==k or j==k:
            k=np.random.randint(0,len(xselect)-1)
        
        xi,yi = xselect[i],yselect[i]
        xj,yj = xselect[j],yselect[j]
        xk,yk = xselect[k],yselect[k]    
    
        #Find the circumcenter of the three points
        delta = (xk-xj)*(yj-yi) - (xj-xi)*(yk-yj)   
        if (abs(delta)>0):
            i2 = (xi**2+yi**2)
            j2 = (xj**2+yj**2)
            k2 = (xk**2+yk**2)
            alpha = (xk-xi)*(xk-xj) + (yk-yi)*(yk-yj)
            x0 = (xi+xj)/2. + (alpha*(yj-yi))/(2.*delta) 
            y0 = (yi+yj)/2. - (alpha*(xj-xi))/(2.*delta)
            x0 = int(x0)
            y0 = int(y0)
            if x0>=0 and x0<w and y0>=0 and y0<h:
                centres[x0,y0] += 1. 
                r2[x0,y0] += (x0-xi)**2 + (y0-yi)**2
                 
    r2 = r2/centres
    centres=(centres/np.max(centres))*255.
    centres=centres.astype(np.uint8)
    return centres,r2

def mirrorCrop(im,scale=2.):
    im = im.convert("L")
    (width,height) = im.size
    
    #Resize the image to speed up processing
    scale = 2. #max(width,height)/1000.
    if scale<1: scale=1
    widthSmall  = int(width/scale)
    heightSmall = int(height/scale)
    imSmall = im.resize( (widthSmall, heightSmall), Image.BILINEAR ) 
    
    #Convert the image to greyscale
    imSmall = imSmall.convert("L")
    
    #Convert the filtered image to a numpy array for further analysis
    nim = np.asarray(imSmall)
    
    #Constrast stretch
    mx = np.max(nim)
    mn = np.min(nim)
    nim = 255.*(1.0*nim - mn)/(mx - mn)
#    nimI = array2image(nim.astype(uint8)).convert("RGB")
#    nimI.save("nim_" + f)
            
    #Apply an edge filter to the image
    fim = nim.astype(np.float32)
    grad_x = ndi.sobel(fim, 0)
    grad_y = ndi.sobel(fim, 1)
    grad_mag = np.sqrt(grad_x**2+grad_y**2)

    cutoff = stats.scoreatpercentile(grad_mag.flat,95)
    edge = (grad_mag>cutoff)*grad_mag 
    edge = edge.astype(np.int8)
    
    #Identify circular features
    #Apply the circumcenter hough transform to the edge filtered image
    hough,r2 = houghtf(edge) 

    #Identify the brightest pixel in the circle centre coordinate space
    (dimmest, brightest, (tmp1,tmp2), (x0,y0)) = ndimage.measurements.extrema(hough)
    r0 = np.sqrt(r2[x0,y0])
    houghb = np.multiply(hough, int(255./max(1.,brightest)))     
        
    #Transform the circle centre coordinate and radius back onto the original image scale
    x0 = int(x0*scale)
    y0 = int(y0*scale)
    r0 = int(r0*scale*1.05)
        
    #Identify the bounding box of the circle in the image
    bbox = ( y0-r0, x0-r0, y0+r0, x0+r0 )    
    
    #Outline the circle on the original image and save to file
    out = im.copy()
    draw = ImageDraw.Draw(out)
    draw.rectangle((0,0,width,height),fill=0)

    mask = im.copy()
    draw = ImageDraw.Draw(mask)
    draw.rectangle((0,0,width,height),fill=0)
    draw.ellipse(bbox,fill=255)        
    out.paste(im,(0,0,width,height),mask)    
    
    #Crop the image and save the cropped version.
    crop = out.crop(bbox)

    #Find the centre of mass of the image
    (width,height) = crop.size
    scale = 4. #max(width,height)/1000.
    if scale<1: scale=1
    widthSmall  = int(width/scale)
    heightSmall = int(height/scale)
    imSmall = im.resize( (widthSmall, heightSmall), Image.BILINEAR ) 
    
    return crop
    

'''canny.py - Canny Edge detector
Copyright (C) 2011, the scikits-image team
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

 1. Redistributions of source code must retain the above copyright
    notice, this list of conditions and the following disclaimer.
 2. Redistributions in binary form must reproduce the above copyright
    notice, this list of conditions and the following disclaimer in
    the documentation and/or other materials provided with the
    distribution.
 3. Neither the name of skimage nor the names of its contributors may be
    used to endorse or promote products derived from this software without
    specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986
'''
def smooth_with_function_and_mask(image, function, mask):
    """Smooth an image with a linear function, ignoring masked pixels

    Parameters
    ----------
    image : array
      The image to smooth

    function : callable
      A function that takes an image and returns a smoothed image

    mask : array
      Mask with 1's for significant pixels, 0 for masked pixels

    Notes
    ------
    This function calculates the fractional contribution of masked pixels
    by applying the function to the mask (which gets you the fraction of
    the pixel data that's due to significant points). We then mask the image
    and apply the function. The resulting values will be lower by the
    bleed-over fraction, so you can recalibrate by dividing by the function
    on the mask to recover the effect of smoothing from just the significant
    pixels.
    """
    bleed_over = function(mask.astype(float))
    masked_image = np.zeros(image.shape, image.dtype)
    masked_image[mask] = image[mask]
    smoothed_image = function(masked_image)
    output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
    return output_image


def canny(image, sigma=1., low_threshold=.1, high_threshold=.2, mask=None):
    '''Edge filter an image using the Canny algorithm.

    Parameters
    -----------
    image : array_like, dtype=float
      The greyscale input image to detect edges on; should be normalized to 
      0.0 to 1.0.

    sigma : float
      The standard deviation of the Gaussian filter

    low_threshold : float
      The lower bound for hysterisis thresholding (linking edges)

    high_threshold : float
      The upper bound for hysterisis thresholding (linking edges)

    mask : array, dtype=bool, optional
      An optional mask to limit the application of Canny to a certain area.

    Returns
    -------
    output : array (image)
      The binary edge map.

    See also
    --------
    skimage.sobel

    Notes
    -----
    The steps of the algorithm are as follows:

    * Smooth the image using a Gaussian with ``sigma`` width.
    
    * Apply the horizontal and vertical Sobel operators to get the gradients
      within the image. The edge strength is the norm of the gradient.
    
    * Thin potential edges to 1-pixel wide curves. First, find the normal 
      to the edge at each point. This is done by looking at the 
      signs and the relative magnitude of the X-Sobel and Y-Sobel
      to sort the points into 4 categories: horizontal, vertical,
      diagonal and antidiagonal. Then look in the normal and reverse 
      directions to see if the values in either of those directions are 
      greater than the point in question. Use interpolation to get a mix of 
      points instead of picking the one that's the closest to the normal. 
    
    * Perform a hysteresis thresholding: first label all points above the 
      high threshold as edges. Then recursively label any point above the 
      low threshold that is 8-connected to a labeled point as an edge.

    References
    -----------
    Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986

    William Green' Canny tutorial
    http://dasl.mem.drexel.edu/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html

    Examples
    --------
    >>> from skimage import filter
    >>> # Generate noisy image of a square
    >>> im = np.zeros((256, 256))
    >>> im[64:-64, 64:-64] = 1
    >>> im += 0.2*np.random.random(im.shape)
    >>> # First trial with the Canny filter, with the default smoothing
    >>> edges1 = filter.canny(im)
    >>> # Increase the smoothing for better results
    >>> edges2 = filter.canny(im, sigma=3)    
    '''

    #
    # The steps involved:
    #
    # * Smooth using the Gaussian with sigma above.
    #
    # * Apply the horizontal and vertical Sobel operators to get the gradients
    #   within the image. The edge strength is the sum of the magnitudes
    #   of the gradients in each direction.
    #
    # * Find the normal to the edge at each point using the arctangent of the
    #   ratio of the Y sobel over the X sobel - pragmatically, we can
    #   look at the signs of X and Y and the relative magnitude of X vs Y
    #   to sort the points into 4 categories: horizontal, vertical,
    #   diagonal and antidiagonal.
    #
    # * Look in the normal and reverse directions to see if the values
    #   in either of those directions are greater than the point in question.
    #   Use interpolation to get a mix of points instead of picking the one
    #   that's the closest to the normal.
    #
    # * Label all points above the high threshold as edges.
    # * Recursively label any point above the low threshold that is 8-connected
    #   to a labeled point as an edge.
    #
    # Regarding masks, any point touching a masked point will have a gradient
    # that is "infected" by the masked point, so it's enough to erode the
    # mask by one and then mask the output. We also mask out the border points
    # because who knows what lies beyond the edge of the image?
    #

    if image.ndim != 2:
        raise TypeError("The input 'image' must be a two dimensional array.")

    if mask is None:
        mask = np.ones(image.shape, dtype=bool)
    fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
    smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
    jsobel = ndi.sobel(smoothed, axis=1)
    isobel = ndi.sobel(smoothed, axis=0)
    abs_isobel = np.abs(isobel)
    abs_jsobel = np.abs(jsobel)
    magnitude = np.hypot(isobel, jsobel)

    #
    # Make the eroded mask. Setting the border value to zero will wipe
    # out the image edges for us.
    #
    s = generate_binary_structure(2, 2)
    eroded_mask = binary_erosion(mask, s, border_value=0)
    eroded_mask = eroded_mask & (magnitude > 0)
    #
    #--------- Find local maxima --------------
    #
    # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
    # 90-135 degrees and 135-180 degrees.
    #
    local_maxima = np.zeros(image.shape, bool)
    #----- 0 to 45 degrees ------
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    # Get the magnitudes shifted left to make a matrix of the points to the
    # right of pts. Similarly, shift left and down to get the points to the
    # top right of pts.
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 45 to 90 degrees ------
    # Mix diagonal and vertical
    #
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:, 1:][pts[:, :-1]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 90 to 135 degrees ------
    # Mix anti-diagonal and vertical
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1a = magnitude[:, 1:][pts[:, :-1]]
    c2a = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2a * w + c1a * (1.0 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1.0 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 135 to 180 degrees ------
    # Mix anti-diagonal and anti-horizontal
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    #
    #---- Create two masks at the two thresholds.
    #
    high_mask = local_maxima & (magnitude >= high_threshold)
    low_mask = local_maxima & (magnitude >= low_threshold)
    #
    # Segment the low-mask, then only keep low-segments that have
    # some high_mask component in them
    #
    labels, count = label(low_mask, np.ndarray((3, 3), bool))
    if count == 0:
        return low_mask

    sums = (np.array(ndi.sum(high_mask, labels,
                             np.arange(count, dtype=np.int32) + 1),
                     copy=False, ndmin=1))
    good_label = np.zeros((count + 1,), bool)
    good_label[1:] = sums > 0
    output_mask = good_label[labels]
    return output_mask

def convolve(img,kernel, crop=True, fftshift=True, fft_pad=True, psf_pad=False):
    """
    Convolve an image with a kernel.  Returns something the size of an image.
    Assumes image & kernel are centred
    *NOTE* Order matters; the kernel should be second.

    Options:
    fft_pad - Default onp.   Zero-pad image to the nearest 2^n
    psf_pad - Default off.  Zero-pad image to be at least the sum of the image
        sizes (in order to avoid edge-wrapping when smoothing)
    crop - Default onp.  Return an image of the size of the largest input image.
        If the images are asymmetric in opposite directions, will return the
        largest image in both directions.
    fftshift - If return_fft on, will shift & crop image to appropriate
        dimensions
    """

    imgshape = img.shape
    kernshape = kernel.shape
    # find ideal size (power of 2) for fft.  Can add shapes because they are tuples
    if fft_pad:
        if psf_pad: 
            # add the X dimensions and Y dimensions and then take the max (bigger)
            fsize = 2**np.ceil(np.log2(np.max(np.array(imgshape)+np.array(kernshape)))) 
        else: 
            # add the shape lists (max of a list of length 4) (smaller)
            fsize = 2**np.ceil(np.log2(np.max(imgshape+kernshape)))
        newshape = np.array([fsize,fsize])
    else:
        if psf_pad:
            newshape = np.array(imgshape)+np.array(kernshape) # just add the biggest dimensions
        else:
            newshape = np.array([np.max([imgshape[0],kernshape[0]]),np.max([imgshape[1],kernshape[1]])]) 
    centerx, centery = newshape/2.
    imgquarter1x, imgquarter1y = centerx - imgshape[0]/2.,centery - imgshape[1]/2.
    imgquarter3x, imgquarter3y = imgquarter1x + imgshape[0],imgquarter1y + imgshape[1]
    kernelquarter1x, kernelquarter1y = centerx - kernshape[0]/2.,centery - kernshape[1]/2.
    kernelquarter3x, kernelquarter3y = kernelquarter1x + kernshape[0],kernelquarter1y + kernshape[1]
    bigimg = np.zeros(newshape,dtype=np.float32)
    bigkernel = np.zeros(newshape,dtype=np.float32)
    bigimg[imgquarter1x:imgquarter3x,imgquarter1y:imgquarter3y] = img
    bigkernel[kernelquarter1x:kernelquarter3x,kernelquarter1y:kernelquarter3y] = kernel 
    imgfft = np.fft.rfft2(bigimg)
    kernfft = np.fft.rfft2(bigkernel)
    fftmult = imgfft*kernfft
    rifft = np.fft.fftshift( np.fft.irfft2( fftmult ) ) 
    if crop:
        result = rifft[ imgquarter1x:imgquarter3x, imgquarter1y:imgquarter3y ]
        return result
    else:
        return rifft        

def correlate(img,kernel, crop=True, fftshift=True, fft_pad=True, psf_pad=False):
    """
    correlate an image with a kernel.  Returns something the size of an image.
    Assumes image & kernel are centred
    *NOTE* Order matters; the kernel should be second.

    Options:
    fft_pad - Default onp.   Zero-pad image to the nearest 2^n
    psf_pad - Default off.  Zero-pad image to be at least the sum of the image
        sizes (in order to avoid edge-wrapping when smoothing)
    crop - Default onp.  Return an image of the size of the largest input image.
        If the images are asymmetric in opposite directions, will return the
        largest image in both directions.
    fftshift - If return_fft on, will shift & crop image to appropriate
        dimensions
    """

    imgshape = img.shape
    kernshape = kernel.shape
    # find ideal size (power of 2) for fft.  Can add shapes because they are tuples
    if fft_pad:
        if psf_pad: 
            # add the X dimensions and Y dimensions and then take the max (bigger)
            fsize = 2**np.ceil(np.log2(np.max(np.array(imgshape)+np.array(kernshape)))) 
        else: 
            # add the shape lists (max of a list of length 4) (smaller)
            fsize = 2**np.ceil(np.log2(np.max(imgshape+kernshape)))
        newshape = np.array([fsize,fsize])
    else:
        if psf_pad:
            newshape = np.array(imgshape)+np.array(kernshape) # just add the biggest dimensions
        else:
            newshape = np.array([np.max([imgshape[0],kernshape[0]]),np.max([imgshape[1],kernshape[1]])]) 
    centerx, centery = newshape/2.
    imgquarter1x, imgquarter1y = centerx - imgshape[0]/2.,centery - imgshape[1]/2.
    imgquarter3x, imgquarter3y = imgquarter1x + imgshape[0],imgquarter1y + imgshape[1]
    kernelquarter1x, kernelquarter1y = centerx - kernshape[0]/2.,centery - kernshape[1]/2.
    kernelquarter3x, kernelquarter3y = kernelquarter1x + kernshape[0],kernelquarter1y + kernshape[1]
    bigimg = np.zeros(newshape,dtype=np.float32)
    bigkernel = np.zeros(newshape,dtype=np.float32)
    bigimg[imgquarter1x:imgquarter3x,imgquarter1y:imgquarter3y] = img
    bigkernel[kernelquarter1x:kernelquarter3x,kernelquarter1y:kernelquarter3y] = kernel 
    imgfft = np.fft.rfft2(bigimg)
    kernfft = np.fft.rfft2(bigkernel)
    fftmult = imgfft*kernfft.conj()
    rifft = np.fft.fftshift( np.fft.irfft2( fftmult ) ) 
    if crop:
        result = rifft[ imgquarter1x:imgquarter3x, imgquarter1y:imgquarter3y ]
        return result
    else:
        return rifft        

