'''
This is a collection of useful functions for spike_sorting.
'''
import numpy
import scipy
import random
from scipy.cluster.vq import kmeans, vq
from genutils.smoothing import smooth
from genutils.signal_derivative import signal_derivative_1D
from genutils import special_functions, handy_functions
import time
import pylab
import cPickle
import pywt

def determine_spike_threshold( dss, debug=-1 ):
    '''
    Determines the threshold in the first derivative of the smoothed signal.
    '''
    # find the difference from gaussianity of the first derivative.
    sorted_dss = numpy.sort(dss)
    # we want the greatest deviation from normality in the energy.
    ncdf = 0.5*( 1.0 + scipy.special.erf(  ( sorted_dss - numpy.average(dss) )/\
                                           ( numpy.std(dss)*numpy.sqrt(2) )  ) )
    pfn = ncdf-numpy.linspace(0,1,len(dss))
    
    # It works like this, we step away from the middle index by 5% and we find then, the
    #    most negative difference below 45% and the most positive difference above 55%.
    dss_len = len(dss)
    fourty_five_pct = int(0.45*dss_len)
    fifty_five_pct  = int(0.55*dss_len)
    # find the minimum difference (most negative) from 0 up to fourty_five_percent
    low_max_index = numpy.argsort(pfn[:fourty_five_pct])[0]
    # find the maximum difference (most positive) from fifty_five_pct up to dss_len
    high_max_index = numpy.argsort(pfn[fifty_five_pct:])[-1]

    low_indx  = low_max_index
    high_indx = fifty_five_pct + high_max_index
    # and finally, the thresholds themselves
    low_threshold  = sorted_dss[low_indx]
    high_threshold = sorted_dss[high_indx]

    # -------------------------------------\/--[ DEBUG ]--\/----------------------------
    # debugging codes below
    if debug > 0: 
        lowlim = debug - 100
        uplim  = debug + 100
        print 'The thresholds in the first derivative are: ' + str(low_threshold) + \
              ' and ' + str(high_threshold)
        # debugging plots:
        #       Plotting cumulative distribution functions of gaussian and first derivative.
        pylab.figure()
        pylab.subplot(311)
        pylab.hist(dss,200, normed=True)
        x_range = numpy.linspace(min(dss), max(dss), 400)
        normal_fn = [special_functions.normal(numpy.average(dss),numpy.std(dss), x) \
                         for x in x_range]
        pylab.plot(x_range, normal_fn, linewidth=3) 
        pylab.axvline(sorted_dss[fourty_five_pct], color='black')
        pylab.axvline(sorted_dss[fifty_five_pct], color='black')
        pylab.axvline(low_threshold, color='green')
        pylab.axvline(high_threshold, color='red')
        pylab.subplot(312)
        pylab.plot(sorted_dss, numpy.linspace(0,1,len(dss)),linewidth=2)
        pylab.plot(sorted_dss, ncdf, linewidth=2)
        pylab.axvline(sorted_dss[fourty_five_pct], color='black')
        pylab.axvline(sorted_dss[fifty_five_pct], color='black')
        pylab.axvline(low_threshold, color='green')
        pylab.axvline(high_threshold, color='red')
        pylab.ylabel('CDFs')
        pylab.legend(['Data','Gaussian'])
        pylab.subplot(313)
        pylab.plot(sorted_dss,pfn, linewidth=2)
        pylab.axvline(sorted_dss[fourty_five_pct], color='black')
        pylab.axvline(sorted_dss[fifty_five_pct], color='black')
        pylab.axvline(low_threshold, color='green')
        pylab.axvline(high_threshold, color='red')
        pylab.ylabel('Difference of CDFs')
        pylab.xlabel('First Derivative Value')
    # -------------------------------------/\--[ DEBUG ]--/\----------------------------
    # debugging codes above
    return low_threshold, high_threshold
    


def find_spikes(signal, ss, dss ,dt ,low_threshold, high_threshold, peak_search_time, debug=-1):
    '''
    Determines the indecies corresponding to the peak value of each spike in a signal.
    Inputs:
        signal              : a time series recording of extracellular voltages
        ss                  : a smoothed version of the signal
        dss                 : first derivative of the smoothed signal
        low_threshold       : lower threshold in the dss
        high_threshold      : upper threshold in the dss
    Returns:
        spikes              : a list of spike peak index values.  
    '''
    # ----------------------------------------
    # -- Find crossings of thresholds
    # ----------------------------------------
    # for the high_threshold
    # with the theshold, we find the positive going crossings of this threshold.
    high_crossings = []
    for i in xrange(len(dss)-1):
        if dss[i+1] > high_threshold and dss[i] <= high_threshold:
            high_crossings.append( i )
    # for the low_threshold
    # with the theshold, we find the negative going crossings of this threshold.
    low_crossings = []
    for i in xrange(len(dss)-1):
        if dss[i+1] < low_threshold and dss[i] >= low_threshold:
            low_crossings.append( i )

    # we need to find the peaks in the smoothed_signal that
    #   directly follow the crossings.
    pst_ms = int(peak_search_time/dt) # look through the next pst_ms for a peak
    len_dss = len(dss)
    ss_average   = numpy.average(ss)

    # -----------------------------------------------------------------------------------
    # ----------------- Look both ways from all crossings for peaks ---------------------
    # -----------------------------------------------------------------------------------
    crossings = []
    crossings.extend(high_crossings)
    crossings.extend(low_crossings)
    crossings.sort()
    crossings_len = len(crossings)
        
    # also ignore the first crossings if they are in the first pst_ms.
    for i in xrange(crossings_len):
        if crossings[i] > pst_ms:
            first_valid_crossing = i
            break
    # make sure we ignore any crossings with less than pst_ms left in the recording.
    for i in xrange(crossings_len):
        if crossings[crossings_len-i-1] + pst_ms < len_dss-1:
            last_valid_crossing = crossings_len-i-1
            break

    peaks = set()
    for crossing in crossings[first_valid_crossing:last_valid_crossing]:
        # --------------------------------------
        # begin looking both ways for a peak.
        # --------------------------------------
        # look right
        for i in xrange(crossing,crossing+pst_ms):
            maybe_peaks = []
            # look for a PEAK in the smoothed signal (not a troph)
            if ss[i+2] < ss[i+1] and ss[i+1] > ss[i]:
                maybe_peaks.append( i+1 )
                break
            # look for a TROPH in the smoothed signal
            if ss[i+2] > ss[i+1] and ss[i+1] < ss[i]:
                maybe_peaks.append( i+1 )
                break
        # look left
        for i in xrange(0,pst_ms):
            # look for a PEAK in the smoothed signal (not a troph)
            j = crossing - i
            if ss[j+2] < ss[j+1] and ss[j+1] > ss[j]:
                maybe_peaks.append( j+1 )
                break
            # look for a TROPH in the smoothed signal
            if ss[j+2] > ss[j+1] and ss[j+1] < ss[j]:
                maybe_peaks.append( j+1 )
                break
        if len(maybe_peaks) == 2:
            # choose the strongest of the two peaks (having looked forward and backward)
            str0 = abs(ss_average - ss[maybe_peaks[0]])
            str1 = abs(ss_average - ss[maybe_peaks[1]])
            if str0 >= str1:
                peaks.add(maybe_peaks[0])
            else:
                peaks.add(maybe_peaks[1])
        elif len(maybe_peaks) == 1:
            peaks.add(maybe_peaks[0])
        else:
            print 'peaks found: %d not equal to 1 or 2.' % (len(maybe_peaks))
        
    # make the peaks a list instead of a set
    peaks = list(peaks)
    peaks.sort()

    print 'high_crossings: %d    low_crossings: %d' % (len(high_crossings), len(low_crossings))
    print 'peaks: %d' % (len(peaks))

    # if we have a two peaks close together without any peak in the signal in between, weed it out.
    to_be_weeded = []
    for i in xrange(len(peaks)-1):
        if peaks[i+1]-peaks[i] < pst_ms: # only consider weeding peaks that are closer together than pst_ms.
            first_peak  = peaks[i]
            second_peak = peaks[i+1]
            # starting at the first peak, heading towards the second peak, make sure there exists a
            #   peak in between.
            # intermediate peak found?
            ipf = False
            for i in xrange(first_peak, second_peak-2):
                if ss[i+2] > ss[i+1] and ss[i+1] < ss[i]:
                    # print 'found minimum at %d' % (i)
                    ipf = True
                    break
                if ss[i+2] < ss[i+1] and ss[i+1] > ss[i]:
                    # print 'found maximum at %d' % (i)
                    ipf = True
                    break
            if not ipf:
                # we'll add the weaker of the peaks to the to_be_weeded list
                first_diff  = abs(first_peak - ss_average)
                second_diff = abs(second_peak - ss_average)
                if first_diff >= second_diff:
                    to_be_weeded.append(second_peak)
                    # print '%d added to the to_be_weeded list' % (second_peak)
                if first_diff < second_diff:
                    to_be_weeded.append(first_peak)
                    # print '%d added to the to_be_weeded list' % (first_peak)

    # the real peaks are those after weeding has taken place.
    real_peaks = []
    for peak in peaks:
        if peak not in to_be_weeded:
            real_peaks.append(peak)
    # now that we're done weeding, replace peaks with the real peaks.
    peaks = real_peaks

    print 'after weeding...'
    print 'peaks: %d' % (len(peaks))

    # -------------------------------------\/--[ DEBUG ]--\/----------------------------
    # debugging codes below
    if debug > 0: 
        lim = debug
        lowlim = lim - 100
        uplim  = lim + 100
        pylab.figure()
        # plot the derivative of the smoothed signal
        pylab.subplot(211)
        # plot high and low crossings as vertical lines (and peaks too)
        for high_crossing in high_crossings:
            if high_crossing > lowlim and high_crossing <= uplim:
                pylab.axvline(high_crossing-lowlim, linewidth=3, color='green')
        for low_crossing in low_crossings:
            if low_crossing > lowlim and low_crossing <= uplim:
                pylab.axvline(low_crossing-lowlim, linewidth=3, color='red')
        for peak in peaks:
            if peak > lowlim and peak <= uplim:
                pylab.axvline(peak-lowlim, linewidth=1, color='green')
        pylab.plot(dss[lowlim:uplim],linewidth=3)
        pylab.ylim(min(dss[lowlim:uplim]),max(dss[lowlim:uplim]))
        pylab.axhline(high_threshold, color='green')
        pylab.axhline(low_threshold, color='red')
        pylab.ylabel('First Derivative')
        # plot the signal and the smoothed signal
        pylab.subplot(212)
        pylab.plot(signal[lowlim:uplim])
        pylab.plot(ss[lowlim:uplim],linewidth=3)
        # plot the peak lines too
        for peak in peaks:
            if peak > lowlim and peak <= uplim:
                pylab.axvline(peak-lowlim, linewidth=1, color='green')
        pylab.ylim( min(signal[lowlim:uplim]), max(signal[lowlim:uplim]))
        pylab.ylabel('Signal')
        pylab.xlabel('Index above ' + str(lowlim) )
     
        pylab.show()
    # -------------------------------------/\--[ DEBUG ]--/\----------------------------
    # debugging codes above
    return peaks


def automated_find_spikes(signal, dt, filter_width=0.8, peak_search_time=4.0, debug=-1):
    '''
    Determines the indecies corresponding to the peak value of each spike in a signal.
    Inputs:
        signal              : a time series recording of extracellular voltages
        dt                  : the time between samples in the signal (in ms)
        filter_width        : width of smoothing filter, full width half max (in ms)
        peak_search_time    : time after crossing in ms to look for a related peak.
        debug               : what index to show in the debugging plot

    Returns:
        spikes              : a list of spike peak index values.  
    '''
    # -------------------------------------------------------------------------------
    # -- smooth the signal to wash out high frequency noise
    # -------------------------------------------------------------------------------
    smoothing_type = 'hanning'               # the class of function to convolve the signal with
    filter_width = filter_width              # time in ms for the hanning filter 
    window_len = int(filter_width / dt) * 2  # length in time steps of the smoothing window
                                             #    window_len/2 is roughly the fwhm 
    ss = smooth(signal, window_len, window = smoothing_type)

    # -------------------------------------------------------------------------------
    # -- determine upper and lower threshold
    # -------------------------------------------------------------------------------
    dss  = signal_derivative_1D(ss, dt)   # first derivative
    low_threshold, high_threshold = determine_spike_threshold( dss, debug=debug)

    # --------------------------------------------------------------------------------
    # -- find all the spikes given the thresholds
    # --------------------------------------------------------------------------------
    peaks = find_spikes(signal, ss, dss, dt, low_threshold, high_threshold, 
                          peak_search_time, debug=debug)
        
    return peaks
    

        
def window_spikes(signal, spike_index_list, window_size=40, pre_padding=0.50, exclude_overlapers=True):
    '''
    Returns shapshots of each spike in the spike_index_list.  The snapshot is of size
        window_size and has the spike index at pre_padding x 100 percent.  Windows which 
        would overlap are excluded and their indexes are returned.
    Inputs:
        signal              : a one dimensional numpy array of values
        spike_index_list    : a one dimensional list or array of integers
        window_size         : the total size of the spike snapshot
        pre_padding         : the normalized relative position of the peak
    Returns:
        spike_windows       : a list of one dimensional numpy arrays (zero meaned)
        excluded_index_list : a list of excluded indexes so that 
                                len(spike_windows) = len(spike_index_list) -
                                                     len(excluded_indexes)

    example:
        window_size = 30
        pre_padding = 0.30

                 -
                / \
             /-/   \
        ----/       \-----------------  
        ..............................
        |        |         |         |
        0        10        20        30
    '''
    pre_i = int(window_size * pre_padding)
    post_i = window_size - pre_i

    # ------------------------------------------------------------------------
    # -- determine which (if any) indexes will be excluded due to overlapping
    # ------------------------------------------------------------------------
    good_spike_index_list = []
    excluded_index_list   = []
    for i in xrange(len(spike_index_list)):
        # assume spike is good unless it overlaps with edge or another spike window
        spike_bad = False
        
        si = spike_index_list[i]
        # proposed begining index
        bi = si - pre_i
        # proposed ending index
        ei = si + post_i
        if bi > 0 and ei < len(signal)-1:
            if i > 0 and bi < spike_index_list[i-1] + post_i:
                spike_bad = True 
            elif i < len(spike_index_list)-1 and ei > spike_index_list[i+1] - pre_i:
                spike_bad = True
        else: 
            # spike too close to start of signal
            #   or end of signal 
            spike_bad = True

        if spike_bad:
            excluded_index_list.append(si)
        else:
            good_spike_index_list.append(si)

    if not exclude_overlapers: 
        good_spike_index_list.extend(excluded_index_list)

    # ------------------------------------------------------------------------
    # -- make the spike windows from the good_spike_index_list
    # ------------------------------------------------------------------------
    spike_windows = numpy.empty( (len(good_spike_index_list),window_size), numpy.float64  )
    for i in xrange(len(spike_windows)):
        gsi = good_spike_index_list[i]
        spike_windows[i] = signal[  gsi-pre_i : gsi+post_i  ]
        spike_windows[i] = spike_windows[i] - numpy.mean(spike_windows[i])
    
    return spike_windows, excluded_index_list


def decompose_spikes(spike_windows, wavelet_type='db2'):
    '''
    Decompose the spikes into their wavelet amplitudes using the pywt.dwt function.
        The results are approximate and detail amplitudes which are returned as a single
        array of values.
    Inputs:
        spike_windows       : a list of one dimensional numpy arrays (zero meaned)
        wavelet_type        : a valid wavelet type, to see all types available check out
                                the pywt module
    Returns:
        cT                  : a list of one dimensional numpy arrays
                                len(cT) = len(spike_windows)
                                len(cT[i]) > len(spike_windows[i])
                                If you need cA and cD for idwt or something:
                                    cA[i] = cT[i,  :len(cT[i])/2]
                                    cD[i] = cT[i, len(cT[i])/2: ]
    '''
    cA = [] # the approximate amplitudes
    cD = [] # the detail amplitudes
    for i in xrange(spike_windows.shape[0]):
        # decompose the spikes into amplitudes
        tcA, tcD = pywt.dwt(spike_windows[i], wavelet_type)
        # save the amplitudes
        cA.append(tcA)
        cD.append(tcD)
    # convert the lists to a numpy array
    cA = numpy.array(cA)
    cD = numpy.array(cD)
    cT = numpy.hstack((cA,cD))
    # we now have an array of n x m where
    #     n = number of spikes
    #     m = number of total amplitudes each spike is described by
    return cT

def mkmeans(data, k, iter=20):
    '''
    A kmeans wrapper that runs kmeans on the data blindly for the first 75% of the time, then
        using the best results, the remaining 25% of the time the initial seeds are random
        members of each cluster.
    '''
    import random
    # run the initial kmeans
    cb, d = kmeans(data, k, iter=int(iter*0.75) )
    
    remaining_iter = iter - int(iter*0.75)
    seeds = []
    while( len(seeds) < remaining_iter ):
        seeds.append( [] )
        membership, distortions = vq( data, cb )
        for i in xrange(k):
            candidate_seed_index = random.choice( membership )
            while( candidate_seed_index != i ):
                candidate_seed_index = random.choice( membership )
            seeds[-1].append( data[candidate_seed_index] )

    cbs = [cb]
    ds  = [d]
    for seed in seeds:
        cb, d = kmeans(data, numpy.array(seed), iter=1)
        cbs.append( cb )
        ds.append(  d  )
    
    best_index = numpy.argsort( ds )[0]
    return cbs[best_index], ds[best_index]
    
def run_kmeans(kmax, data, threshold = 1.000000000001e-8, repeat = 1):
    '''
    This will run the kmeans clustering algorithm to determine possible cluster centers in 
        the data.  Initial cluster centers are chosen randomly from the data and the kmeans
        algorithm is run until a threshold is crossed.
    Inputs:
        kmax        : the maximum number of clusters to try
        data        : a (n x m) numpy array of floats, where
                        n is the number of observations, or repetitions, or data points
                        m is the dimensionality of the observations
        threshold   : the algorithm will stop when the distortion change between iterations
                        is less than this value.
        repeat      : the number of times that kmeans is restarted with random initial clusters.
                        the returned codebooks and distortions will be the best those with the 
                        lowest distortion.
                      NOTE: this is deprecated, the kmeans implementation already has this...
    Returns:
        codebook_list     : A list of codebooks.  len(codebook_list) = kmax - 1
        distortion_list   : Distortions corresponding to the codebooks.
    '''
    # whiten the data
    stds = numpy.std( data, axis=0 )
    wdata = data / stds 
    
    codebook_list    = []
    distortion_list  = []
    # try k values from 2 up to kmax
    k_values         = xrange(2, kmax+1)
    print '===== Running kmeans algorithm ======'
    for k in k_values:
        t1 = time.time()
        cbs = []
        ds  = []
        for i in xrange(repeat):
            codebook, distortion = kmeans(wdata, k, iter=30, thresh=threshold)
            cbs.append(codebook)
            ds.append(distortion)
        best_codebook_index = numpy.argsort( numpy.array(ds) )[0]

        t2 = time.time()
        print 'k = ' + str(k) + ' ran in ' + str(t2-t1) + ' seconds with distortion = '\
               + str(distortion)
        codebook_list.append(cbs[best_codebook_index]*stds) # unwhiten the codebooks
        distortion_list.append(ds[best_codebook_index])
    return codebook_list, distortion_list

def cluster_data(codebook, data):
    '''
    Sorts data into clusters according to the codebook.
    Inputs:
        codebook        : a (k x m) numpy array of floats, where
                            k is the number of clusters
                            m is the dimensionality of the data
        data            : a (n x m) numpy array of floats, where
                            n is the number of data points
                            m is the dimensionality of the data
    Returns:
        data_clusters   : a list of length k data subsets.  Data in the kth element
                            corresponds to the data closest to the kth cluster center.
                            this list is sorted so that the number of data points in 
                            each cluster decreases as you go down the list.
    '''
    # determine what cluster each data point belongs in
    membership, distortion = vq(data, codebook)

    # make a list of empty lists
    unsorted_data_clusters = []
    unsorted_data_index    = []
    for i in xrange(len(codebook)):
        unsorted_data_clusters.append([])
        unsorted_data_index.append([])

    # fill those lists with spike windows belonging to the relevant codebooks
    for i in xrange(len(membership)):
        unsorted_data_clusters[ membership[i] ].append( data[i] )
        unsorted_data_index[ membership[i] ].append( i )

    # sort the data clusters in decreasing cluster membership size
    lengths = []
    for k in xrange(len(codebook)):
        lengths.append( len(unsorted_data_clusters[k]) )
    lengths = numpy.array(lengths)
    sorted_indexes = numpy.argsort(lengths)
    data_clusters  = []
    index_clusters = []
    for i in xrange(len(sorted_indexes)):
        data_clusters.append(unsorted_data_clusters[sorted_indexes[-(i+1)]])
        index_clusters.append(unsorted_data_index[  sorted_indexes[-(i+1)]])

    # cast each cluster to a numpy array
    for cluster in data_clusters:
        cluster = numpy.array(cluster)
    for cluster in index_clusters:
        cluster = numpy.array(cluster)
        
    return data_clusters, index_clusters


def plot_clusters(data_clusters, title = 'Cluster Data', data=True, mean_and_std=False):
    '''
    plot data_clusters
    Inputs:
        data_clusters       : a list of k data clusters.  Each cluster is a
                              (n_k x  m) numpy array of floats, where
                                n_k is the number of data points in the kth cluster
                                m   is the dimensionality of the data
        title               : the title the plot will be given
        data                : boolean value, if True, the actual data will be plotted.
        mean_and_std        : boolean value, if True, the mean value of the data will be plotted.
                                Lines indicating the standard deviation will also be plotted.
    Returns:
        None
    '''
    # determine the y-ranges for all the subplots
    ymins = []
    ymaxs = []
    #pylab.interactive(False)
    for k in xrange(len(data_clusters)):
        ymins.append( numpy.min(data_clusters[k]) )
        ymaxs.append( numpy.max(data_clusters[k]) )
    ymin = min(ymins)
    ymax = max(ymaxs)

    pylab.figure()
    subplots = len(data_clusters)

    if mean_and_std:
        data_means = []
        data_stds  = []
        for cluster in data_clusters:
            data_means.append( numpy.average(cluster, axis=0) )
            data_stds.append(  numpy.std(cluster, axis=0)     )

    for i in xrange(1,subplots+1):
        ax = pylab.subplot(((subplots-1)/2)+1, 2, i)
        pylab.ylim(ymin,ymax)
        if i == 1:
            pylab.title(title)
        if data:
            pylab.plot( numpy.array(data_clusters[i-1]).T )
        if mean_and_std:
            if data:
                bigl = 6
                sml  = 4
            else:
                bigl = 3
                sml  = 2
            pylab.plot( data_means[i-1] ,                  'k', linewidth = bigl )
            pylab.plot( data_means[i-1] + data_stds[i-1] , 'r', linewidth = sml  )
            pylab.plot( data_means[i-1] - data_stds[i-1] , 'r', linewidth = sml  )
        infostring = 'Type %d    %d spikes' % ( i-1, len(data_clusters[i-1]) )
        pylab.text(0.5, 0.1, infostring,\
                   horizontalalignment='center', transform=ax.transAxes )
        pylab.ylim(ymin,ymax)
        if i != 1:
            pylab.yticks([])
            pylab.xticks([])
    #pylab.interactive(True)


def mahalanobis(x, y, vi=None, data=None, method='qr'):
    '''
    Calculate the squared Mahalanobis distance between two n dimensional vectors x and y.  The 
        inverse of the covariance matrix for the data can be passed as vi, or if vi=None, then 
        data be used to determine the inverse covariance matrix.
    CAUTION:
        The Mahalanobis distance calculation relies on matrix inversion and therefor suffers from
            numerical precision problems if the covariance matrix is barely invertable or singular.
    Inputs:
        x           : an n dimensional numpy array of doubles
        y           : an n dimensional numpy array of doubles
        vi          : an (n x n) numpy array of doubles representing the inverse of the covariance
                        matrix of the distribution from which x and y are drawn.
                      Note: if method = 'qr' then vi should be (r' . r)^(-1) if it is not None.
        data        : an (m x n) numpy array of doubles containing containing samples of 
                        the distribution from which x and y are drawn.
        method      : 'qr' or 'vi' for using the qr decomp or the inverse of the cov respectively.
    Returns:
        dist        : The squared Mahalanobis distance between the vectors x and y.
    '''
    # calculate the inverse covariance matrix from the data
    # FIXME add some checks on the input and maybe throw exceptions or something
    if method == 'vi':
        if vi==None and not data==None:
            vi = numpy.linalg.pinv( numpy.cov(data, rowvar=0) ) # using pseudo inversion because of
                                                                # floating point accuracy problem
        # calculate the mahalanobis distance (squared)
        fdot = numpy.dot((x-y), vi)
        dist = numpy.dot( fdot,(x-y).T )
    if method == 'qr':
        if vi == None and not data==None:
            # determine the qr decomp of the covariance matrix
            zmdata = (data - numpy.average(data, axis=0))/numpy.sqrt(data.shape[0]-1)
            r = numpy.linalg.qr(zmdata,mode='r')
            vi = numpy.linalg.inv(numpy.dot(r.T,r))
    
    # calculate the mahalanobis distance (squared)
    fdot = numpy.dot((x-y), vi)
    dist = numpy.dot( fdot,(x-y).T )

    return dist


def get_cluster_funct(std, mean, n):
    standard_error = std/numpy.sqrt(n)
    upper_bound = mean + standard_error
    lower_bound = mean - standard_error
    return lambda x: 1.0 - scipy.special.erf( \
                            max( (x-upper_bound, lower_bound-x, 0.0) ) / \
                            ( (std+standard_error)*numpy.sqrt(2) )  )

def cluster_similarity(data_clusters):
    '''
    Calculate the similarity between data clusters.  The similarity is positive definate and less
        than or equal to 1.  A similarity of 1 means that two clusters are indistinguishable.  
        Small values indicate that mis-classification errors are unlikely to occur due to noise.
    Inputs:
        data_clusters       : a list of length k (k = number of clusters)
                                each element is a (m_i x n) numpy array of data vectors
                                    m_i is the number of data vectors
                                    n   is the dimensionality of the vectors
    Returns:
        max_similarity      : the maximum similarity between any two clusters
    '''
    n = len(data_clusters[0][0])

    # calculate the means and variances of each data cluster
    means    = [numpy.average(cluster,axis=0) for cluster in data_clusters]
    dmeans   = [signal_derivative_1D(mean, 1.0) for mean in means]        # first derivative
    ddmeans  = [signal_derivative_1D(dmean, 1.0)**2 for dmean in dmeans]  # second derivative sqrd
    nddmeans = [ddmean / numpy.dot(ddmean,ddmean) for ddmean in ddmeans]  # normalized
    stds     = [numpy.std(cluster,axis=0) for cluster in data_clusters]

    # account for the sampling error by increasing the std
    #for i in xrange(len(stds)):
    #    stds[i] = stds[i] + stds[i]/(numpy.sqrt(len(data_clusters[i])))

    # set up the cluster distribution functions.
    cfuncts = []
    for i in xrange(len(data_clusters)):
        cfuncts.append( [get_cluster_funct(std, mean, n) \
                    for std,mean in zip(stds[i],means[i])] )
    
    # evaluate, for each pair of clusters, the similarity between them.
    similarities = []
    for i in xrange(len(means)-1):
        for j in xrange(i+1,len(means)):
            sims = [cfuncts[i][k](means[j][k])*cfuncts[j][k](means[i][k])* \
                    nddmeans[i][k] * nddmeans[j][k]
                        for k in xrange(n)]
            similarities.append( numpy.sum(sims)/n)
            
    return max(similarities)

def membership_likelyhood(data_cluster, other_cluster=None):
    '''
    Calculate the likelyhood that a point in a cluster belongs to that cluster.

    Inputs:
        data_cluster        : a (m x n) numpy array of data vectors
                                    m is the number of data vectors
                                    n is the dimensionality of the vectors
        other_cluster       : a (m2 x n) numpy array of data vectors
                                    m2 being the number of data vectors in other_cluster
    Returns:
        likelyhoods         : a numpy array of length m representing the likelyhood each
                                vector belongs to the data cluster. 
                              Note: if other_cluster != None, then only the likelyhoods of
                                    the other cluster's vectors being part of data_cluster 
                                    is calculated and returned, so it will be a numpy array
                                    of length m2.
    '''
    m = len(data_cluster)
    n = len(data_cluster[0])

    # calculate the means and variances of each data cluster
    means = numpy.average(data_cluster, axis=0)
    stds  = numpy.std(    data_cluster, axis=0)

    # account for the sampling error by increasing the std
    #stds = stds + stds/(numpy.sqrt(m))

    # set up the cluster distribution functions.
    cfuncts = [get_cluster_funct(std, mean, n) \
                  for std,mean in zip(stds,means)]

    likelyhoods = []
    if other_cluster == None:
        # evaluate, for each data vector in data_cluster, likelyhood it belongs in this cluster.
        for data_pt in data_cluster:
            likes = [ cfuncts[j](data_pt[j]) for j in xrange(n) ]
            likelyhoods.append( numpy.sum(likes)/n )
    else:
        # evaluate, for each data vector in other_cluster, likelyhood it belongs in this cluster.
        for data_pt in other_cluster:
            likes = [ cfuncts[j](data_pt[j]) for j in xrange(n) ]
            likelyhoods.append( numpy.sum(likes)/n )
         
    likelyhoods = numpy.array(likelyhoods)
            
    return likelyhoods
    
def ambiguity(data_clusters, unclustered_data=None):
    '''
    Calculate the likelyhood of every point to belong in each cluster.  The ambiguity is the
        difference between the highest likelyhood and the next highest.
    Inputs:
        data_clusters       : a list of length k (k = number of clusters)
                                each element is a (m_i x n) numpy array of data vectors
                                    m_i is the number of data vectors
                                    n   is the dimensionality of the vectors
        unclustered_data    : if not None, then this is the unclustered data vectors,
                                a numpy array of size (m x n) where
                                    m is the number of data vectors
                                    n is the dimensionality of the vectors
    Returns:
        ambiguities         : one minus (the highest_likelyhood - next_highest_likelyhood)
                                this is a list of length k where each element is
                                a numpy array of length m_i.
                              Note: if unclustered_data != None then this will be
                                a vector of length m.
    '''
    n = len(data_clusters[0][0])

    # calculate the means and variances of each data cluster
    means = [numpy.average(cluster,axis=0) for cluster in data_clusters]
    stds = [numpy.std(cluster,axis=0) for cluster in data_clusters]

    # account for the sampling error by increasing the std
    #for i in xrange(len(stds)):
    #    stds[i] = stds[i] + stds[i]/(numpy.sqrt(len(data_clusters[i])))

    # set up the cluster distribution functions.
    cfuncts = []
    for i in xrange(len(data_clusters)):
        cfuncts.append( [get_cluster_funct(std, mean, n) \
                    for std,mean in zip(stds[i],means[i])] )
    
    if unclustered_data == None:
        # for every data vector, calculate all the likelyhoods it belongs in a cluster.
        ambiguities = []
        for cluster in data_clusters:
            ambiguities.append([])
            for data_vector in cluster:
                likelyhoods = [numpy.average([cfunct[i](data_vector[i]) \
                                    for i in xrange(len(cfunct)) ]) \
                                    for cfunct in cfuncts ]
                sorted_indexes = numpy.argsort(likelyhoods)
                ambiguities[-1].append( 1- (likelyhoods[sorted_indexes[-1]] - \
                                        likelyhoods[sorted_indexes[-2]]) )
            ambiguities[-1] = numpy.array(ambiguities[-1])
    else:
        ambiguities = []
        for data_vector in unclustered_data:
            likelyhoods = [numpy.average([cfunct[i](data_vector[i]) \
                                for i in xrange(len(cfunct)) ]) \
                                for cfunct in cfuncts ]
            sorted_indexes = numpy.argsort(likelyhoods)
            ambiguities.append( 1- (likelyhoods[sorted_indexes[-1]] - \
                                    likelyhoods[sorted_indexes[-2]]) )
        ambiguities = numpy.array(ambiguities)
        
            
    return ambiguities

def plot_spike_train(spike_times_list, colors = ['red', 'blue', 'black'], linewidth=2, interactive=False, astl=None, ambt=None, range=None):
    '''
    Creates a vertical bar for each spike.
    Inputs:
    spike_times_list        : An n element list, where n is the number of neurons/trials.  Each
                                element of the list is a m_n element numpy array specifying the
                                times of each spike.
    colors                  : An n element list of color names, the spikes will be drawn in these
                                colors.
    linewidth               : the width of the spike lines
    interactive:            : Will plot two sets of spike_trains, the top version you can use the
                                mouse on to zoom with the span selector.
    astl                    : alternative spike time list, for comparisons
    ambt                    : another set of times, will plot as green (reference times perhaps)
    Returns:
    None
    '''
    n = len(spike_times_list)
    # make sure the colors list is long enough, if not, repeat it.
    while n > len(colors):
        colors.extend(colors)

    if not interactive:
        #pylab.interactive(False)
        for i in xrange(n):
            for j in xrange(len(spike_times_list[i])):
                pylab.axvline(x=spike_times_list[i][j],\
                              ymin=(n-i-1.0)/n, ymax=(n-i)/(n*1.0), 
                              color=colors[i], linewidth=linewidth)
        #pylab.interactive(True)
        pylab.ylim(0,n)
        pylab.yticks('')
        if range==None:
            xmin = numpy.min(numpy.hstack(spike_times_list))
            xmax = numpy.max(numpy.hstack(spike_times_list))
        else:
            xmin = range[0]
            xmax = range[1]
        ax = pylab.gca()
        ax.set_xlim(xmin,xmax)
    else:
        m = len(astl)
        from matplotlib.widgets import SpanSelector
        #pylab.interactive(False)
        fig = pylab.figure()
        ax  = fig.add_subplot(211, axisbg='#FFFFFF')
        ax2 = fig.add_subplot(212, axisbg='#FFFFFF')
        for j in xrange(len(ambt)):
            ax.axvline(x=ambt[j], color='green', linewidth=max(linewidth-1,1))
            ax2.axvline(x=ambt[j], color='green', linewidth=max(linewidth-1,1))
        for i in xrange(n):
            for j in xrange(len(spike_times_list[i])):
                ax.axvline(x=spike_times_list[i][j],\
                              ymin=(n-i-1.0)/n, ymax=(n-i)/(n*1.0), 
                              color=colors[i], linewidth=linewidth)
                ax2.axvline(x=spike_times_list[i][j],\
                              ymin=(n-i-1.0)/n, ymax=(n-i)/(n*1.0), 
                              color=colors[i], linewidth=linewidth)
        for i in xrange(m):
            for j in xrange(len(astl[i])):
                ax.axvline(x=astl[i][j],\
                              ymin=(n-i-1.0)/n, ymax=(n-i)/(n*1.0), 
                              color=colors[(i+1)%len(colors)], linewidth=linewidth)
                ax2.axvline(x=astl[i][j],\
                              ymin=(n-i-1.0)/n, ymax=(n-i)/(n*1.0), 
                              color=colors[(i+1)%len(colors)], linewidth=linewidth)
        ax.set_title('Press left mouse button and drag')
        #pylab.interactive(True)

        def onselect(xmin, xmax):
            ax2.set_xlim(xmin, xmax)
            fig.canvas.draw()

        # set useblit True on gtkagg for enhanced performance
        span = SpanSelector(ax, onselect, 'horizontal', useblit=True,
                            rectprops=dict(alpha=0.5, facecolor='red') )
        pylab.show()

    
def plot_similarity( clustered_data_set ):
    '''
    Plot the maximum similarity between data clusters for a set of
        data clusters.
    Inputs:
        clustered_data_set      : A list of clustered data windows, where each element in the
                                    list corresponds to one k value, starting with k=2.
    '''
    sims = [cluster_similarity(clustered_data_set[i]) for i in xrange(len(clustered_data_set))]
    k_values = range(2,len(clustered_data_set)+2)
    pylab.figure()
    pylab.plot(k_values,sims, linewidth=2)
    pylab.title('Maximum Similarity Between Clusters')
    pylab.ylabel('Similarity')
    pylab.xlabel('Number of Clusters')
    pylab.show()
    
def detection_inspector( signal, spike_index_list, window_size, prepadding=0.3, asil=None):
    '''
    Open a plot window that shows the signal and identified spike times in the top plot.  The
        user can span-select to see zoomed in versions with spike window overlays on the bottom
        plot.
    Inputs:
        signal              : the raw voltage trace
        spike_index_list    : the index list full of spike peaks
        window_size         : the size of the spike window in time steps
        prepadding=0.3      : how to place the spike window relative to the peak
        asil=None           : Actual spike index list (if not none, will show in bottom plot)
    '''   
    from matplotlib.widgets import SpanSelector
    signal_max    = max(signal)
    signal_min    = min(signal)
    plot_max      = signal_max * 1.05 # make it 5% bigger than it needs to be
    indicator_y   = numpy.ones(len(spike_index_list), dtype=numpy.float64) * \
                        numpy.average([signal_max, plot_max])
    indicator_x   = spike_index_list
    plot_asil = False
    if asil != None:
        plot_asil = True
        asil_x    = asil
        asil_y    = numpy.ones(len(asil), dtype=numpy.float64) * \
                               numpy.average([signal_max, plot_max])

    fig = pylab.figure()
    ax  = pylab.subplot(211)
    # -- Plot the top window, with spike indexes
    ax.plot( signal )
    ax.plot( indicator_x, indicator_y, '.' )
    ax.set_ylim(signal_min, plot_max)
    ax.set_ylabel('Signal (mV)')

    # -- Plot the bottom window (initially with default xlim)
    ax2 = pylab.subplot(212)
    ax2.plot( signal )
    ax2.plot( indicator_x, indicator_y, '.' )
    if plot_asil:
        ax2.plot( asil_x, asil_y, 'v')
    ax2.set_ylim(signal_min, plot_max)
    ax2.set_xlabel('Time Step (0.1 ms)')
    
    # keep track of all spans drawn
    sil_spans = set()
    if plot_asil:
        asil_spans = set()

    # this is the vspan for the top plot
    poly = ax.axvspan(-1, 0, facecolor='r', alpha=0.5)

    def onselect(xmin, xmax):
        # change the span on the top plot
        poly.xy[0][0] = xmin
        poly.xy[1][0] = xmin
        poly.xy[2][0] = xmax
        poly.xy[3][0] = xmax
        poly.xy[4][0] = xmin

        ax2.set_xlim(xmin, xmax)
        # also draw in all the spike windows
        for i in xrange(len(spike_index_list)):
            tsi = spike_index_list[i] # tihs spike index
            if tsi not in sil_spans:
                if xmin - window_size < tsi and tsi < xmax + window_size:
                    ax2.axvspan( tsi-window_size*prepadding, tsi+window_size*(1.0-prepadding),
                                 facecolor = 'b', alpha=0.35 )
                    sil_spans.add(tsi)
        if plot_asil: 
            for i in xrange(len(asil)):
                tsi = asil[i] # tihs spike index
                if tsi not in asil_spans:
                    if xmin - window_size < tsi and tsi < xmax:
                        ax2.axvspan( tsi, tsi+window_size,
                                     facecolor = 'y', alpha=0.6 )
                        asil_spans.add(tsi)
        fig.canvas.draw()

    # -- add the span selector to the top plot.
    span  = SpanSelector( ax, onselect, 'horizontal', useblit=True, 
                         rectprops=dict(alpha=0.5, facecolor='red') )

    # -- add the span selector to the bottom plot.
    span2 = SpanSelector( ax2, onselect, 'horizontal', useblit=True, 
                         rectprops=dict(alpha=0.5, facecolor='red') )

    pylab.show()
     
     

            
    
