import sorting_functions
import numpy

class Data_trace():
    '''
    This class takes care of the book keeping associated with spike sorting.
    Methods:
        __init__( data, dt )        : takes raw voltage trace data and the sampling
                                          timestep size
        detect_spikes( filter_width=0.8, window_size=32, prepadding=0.30 )
                                    : takes a filtering function width (in ms) and 
                                          the window_size and prepadding to create windows 
                                          around spikes.
        cluster_spikes( kmax=15, threshold=1.0e-8 )
                                    : takes the maximum apriori number of clusters to try, and the
                                          threshold improvement in mean square error for the kmeans
                                          algorithm.
        spike_index( spike_window_list )
                                    : takes a spike window list and returns the index of each spike in
                                           the original data (self.data)
        spike_time( spike_window_list )
                                    : takes a spike window list and returns the time of each spike in
                                           the original data (self.data)
    '''
    def __init__( self, data, dt ):
        '''
        Initialization method just takes data and dt and saves them.
        '''
        self.data = data
        self.dt   = dt

    def detect_spikes( self, filter_width=0.8, peak_search_time=4.0,\
                         window_size=40, pre_padding=0.50, exclude_overlapers=False, debug=-1):
        '''
        This method detects spikes in the raw voltage data trace.  It first smoothes the data
            with a filter of width filter_width (in ms) and then finds the times when the
            first derivative crosses a threshold in the positive (magnitude) direction.
            The next peak in the raw data after the threshold crossing is a spike peak.
            For each spike peak, a small window of size window_size is cut out of the raw
            voltage data and the peak is centered at pre_padding*100 percent in the window.
        Calls:
            sorting_functions.automated_find_spikes
            sorting_functions.window_spikes
        Alters:
            spike_windows
            index_dict
        '''

        # keep track of passed variables.
        self.detect_filter_width        = filter_width
        self.detect_peak_search_time    = peak_search_time
        self.detect_window_size         = window_size
        self.detect_pre_padding         = pre_padding
        self.detect_exclude_overlapers  = exclude_overlapers
        self.detect_debug               = debug

        # determine the index of every spike in the signal
        peaks = sorting_functions.automated_find_spikes(
                                      self.data, 
                                      self.dt, 
                                      filter_width = filter_width,
                                      peak_search_time = peak_search_time,
                                      debug = debug)
        spike_index_list = numpy.array(peaks) # call it the spike_index list
    
        # make windows of the spikes
        self.spike_windows, excluded_index_list = sorting_functions.window_spikes(
                                                      self.data, 
                                                      spike_index_list, 
                                                      window_size=window_size,
                                                      pre_padding=pre_padding,
                                                      exclude_overlapers=exclude_overlapers)
        
        # keep track of the index related to each spike window.
        #   so that self.spike_window[ self.spike_window_index_list[i] ] corresponds
        #   to the spike at self.data[ self.spike_index_list[i] ]
        spike_window_index_list = []
        for i in spike_index_list:
            if exclude_overlapers:  # check if it is an overlapper
                if i not in excluded_index_list:
                   spike_window_index_list.append(i)
            else: # no need to check, just add it to the list
               spike_window_index_list.append(i)

        # create the self.index_dict so we can keep track of indexes in a cleaner manner
        self.index_dict = {}
        for i in xrange(len(self.spike_windows)):
            self.index_dict[numpy.average(self.spike_windows[i])+self.spike_windows[i][0]] = \
                                                                     spike_window_index_list[i]

    def spike_index(self, spike_window_list):
        '''
        This method returns the spike index associated with each spike window passed.
        '''
        # use the self.index_dict to look up the index of each spike window
        return [self.index_dict[numpy.average(sw)+sw[0]] for sw in spike_window_list]

    def spike_time(self, spike_window_list):
        '''
        This method returns the spike time associated with each spike window passed.
        '''
        # use the self.index_dict to look up the index of each spike window
        index_list = self.spike_index(spike_window_list)
        # return the spike times
        return [i * self.dt for i in index_list]

    def cluster_spikes( self, kmax=15, threshold=1.0e-8 ):
        '''
        This method uses the spike_windows created by the method self.detect_spikes to find
            clusters of spikes.  The algorithm used (kmeans) needs to know how many clusters
            are in the data, and so this code will try values from 2 up to kmax.  The kmeans
            algorithm stops when the improvement in the mean squared error drops below the
            given threshold.
        Calls:
            sorting_functions.run_kmeans
            sorting_functions.cluster_data
        Alters:
            clustered_spike_windows
        '''

        # make sure self.spike_windows exists first.
        if 'spike_windows' not in dir(self):
            raise RuntimeError(
             'You must have run the "detect_spikes" method before running the "cluster_spikes" method.')

        # keep track of passed variables.
        self.spike_cluster_kmax      = kmax
        self.spike_cluster_threshold = threshold

        # run the kmeans clustering algorithm on the data for k=2-kmax
        codebook_list, distortion_list = sorting_functions.run_kmeans(kmax, 
                                                                      self.spike_windows, 
                                                                      threshold=threshold)
        self.codebook_list = codebook_list
        # using the cluster centers just found, sort the data into clusters.
        self.clustered_spike_windows = []
        for codebook in codebook_list:
            clustered_spike_windows, clustered_spike_index = sorting_functions.cluster_data(
                                                                               codebook, 
                                                                               self.spike_windows)
            self.clustered_spike_windows.append(clustered_spike_windows)

