import numpy
import copy
import bisect
import itertools

import matplotlib

def format_frames(frames, greyscale=True):
    """
    This function takes a list of PIL image objects and creates
        a greyscale 3D(or 4D) numpy array (0-255). 
    Inputs:
        frames      : a series of PIL image objects
        --kwargs--
        greyscale=True    : converts rgb to a greyscale image.

        The PIL image objects have a red, green, and blue, channel
           so the conversion is to greyscale is
        greyscale = (11*R + 16*G + 5*B)/32 as is commonplace.
    """
    import numpy

    frame_1 = numpy.array(frames[0])
    x = frame_1.shape[0]
    y = frame_1.shape[1]

    if greyscale:
        return_frames = numpy.empty((len(frames), x, y), dtype=numpy.uint8)
    else:
        return_frames = numpy.empty((len(frames), x, y, 3), dtype=numpy.uint8)

    for i, frame in enumerate(frames):
        nframe = numpy.array(frame, dtype=numpy.uint32)
        if greyscale:
            R = nframe[:,:,0]
            G = nframe[:,:,1]
            B = nframe[:,:,2]
            return_frames[i] = ((R*11 + G*16 + B*5)/32)
        else:
            return_frames[i] = nframe

    return return_frames

def add_scalar_alpha(image, alpha):
    return numpy.array([image.T[0], image.T[1], image.T[2], 
               numpy.ones(image.T[0].shape, dtype=numpy.float64)*alpha]).T

def colorize_image(image, color, alpha=None):
    '''
    Make a tinted image from a greyscale image.
    Inputs:
        image       : an NxM numpy array of floats
        color       : a color (anything that matplotlib supports)
        alpha       : an NxM numpy array of floats 
    Returns:
        tinted image : an NxMx3 numpy array of floats.
    '''
    r,g,b = matplotlib.colors.colorConverter.to_rgb(color)
    if alpha is not None: 
        if alpha.shape == image.shape:
            tinted_image = numpy.array(((image*r).T,
                                        (image*g).T,
                                        (image*b).T,
                                            alpha.T)).T
        else:
            raise RuntimeError('alpha must be %dx%d and is instead %dx%d.' %
                           (image.shape[0], image.shape[1],
                            alpha.shape[0], alpha.shape[1]))
    else:
        tinted_image = numpy.array(((image*r).T,(image*g).T,(image*b).T)).T
        
    return tinted_image

def invert_array(x, max_value=None):
    '''
    Return a copy of x where 
        max(x) maps to 0.0 and
        min(x) maps to original_max(x)
    Inputs:
        x           : an numpy array
        --kwargs--
        max_value   : If None, is set to numpy.max(x), otherwise
                      it specifies the upper limit to the range of x.
    '''
    if max_value is None:
        max_value = numpy.max(x)
    return -x+max_value

def color_compose(image_list, color_list, axes, bg=None, 
                                                invert_image=True, 
                                                invert_alpha=False, 
                                                **kwargs):
    '''
    Compose a new image by layering the greyscale images in image_list, tinted 
        with a color in color_list. 
    Inputs:
        image_list              : a list of NxM numpy arrays (greyscale images)
        color_list              : a list of matplotlib supported colors.
        axes                    : a matplotlib Axes object
        --kwargs--
        bg=None                 : one of 'black', 'white' or a NxMx3 numpy 
                                   array (color image) to be the background for 
                                   the composition
        invert_images=True      : invert the images in image_list before 
                                   creating the composition
        invert_alpha=False      : normally the (greyscale) image is used also
                                   as the alpha chanel for layering itself, 
                                   if this is  set to True though the inverse 
                                   of the (greyscale) image is used instead.
        **kwargs                : keyward arguments passed on to invert_array
                                   whenever (or if ever) it is called.
    Returns:
        None                    : the composition is drawn to axes
    '''
    # setup the background image
    image_shape = image_list[0].shape
    if bg is None or bg == 'white':
        gs = numpy.ones(image_shape, dtype=numpy.float64)
        background = colorize_image(gs, 'white')
    elif bg == 'black':
        gs = numpy.zeros(image_shape, dtype=numpy.float64)
        background = colorize_image(gs, 'white')
    elif bg.shape[:2] == image_shape[:2]:
        background = bg
    else:
        raise RuntimeError('bg must be %dx%d and is instead %dx%d.' %
                           (image_shape[0], image_shape[1],
                            bg.shape[0], bg.shape[1]))
    if len(background.shape) == 3:
        axes.imshow(background)
    else:
        axes.imshow(colorize_image(background,'white'))

    # layer all the images on top of the background
    for image, color in itertools.izip(image_list, color_list):
        # potentially invert the image
        if invert_image:
            i = invert_array(image, **kwargs)
        else:
            i = image
        # potentially invert the alpha
        if invert_alpha:
            i_alpha = invert_array(image, **kwargs)
        else:
            i_alpha = image
        # layer image onto the coposition
        axes.imshow(colorize_image(i, color, alpha=i_alpha))

def value_bounds(x, fraction, exclude_min_max=True):
    '''
    Calculates the values which bound the domain where the specified
        fraction of data points in x belong.
    Inputs:
        x                    : a list of numpy arrays
        fraction             : the fraction of all data in x (0.0-1.0)
        --kwargs--
        exclude_min_max=True : exclude the minimum and maximum values from
                                x before determining the bounding values.
    Returns:
        minimum_value, maximum_value
    '''
    all_x = []
    for tx in x: 
        if exclude_min_max:
            no_max_x = tx[tx<numpy.max(tx)]
            no_min_max_x = no_max_x[no_max_x>numpy.min(no_max_x)]
            all_x.extend(no_min_max_x)
        else:
            all_x.extend(tx)
    tx = numpy.array(all_x)
    # find the new minimum/maximum value
    tx_fs = tx.flatten()
    tx_fs.sort()
    saturating_data_points = int(len(tx_fs)*(1.0-fraction))
    new_min = tx_fs[max(0,saturating_data_points/2-1)]
    new_max = tx_fs[-max(1,saturating_data_points/2+1)]
    return new_min, new_max

def saturating_linear_norm(x, bounds, exclude_min_max=True, 
                           saturation_values=[1.0,0.0]):
    '''
    Returns a copy of x normalized between 0.0 and 1.0.  The normalization
        is linear between the calculated minimum and maximum.  After mapping
        the values greater than 1.0 or less than 0.0 are saturated to the 
        specified saturation_values (respectively).
    Inputs:
        x                       : a numpy array
        bounds                  : is either (min_value, max_value) for
                                   normalization or is
                                   the fraction (0.0-1.0) of points included 
                                   that are not saturated after normalization.
        --kwargs--
        exclude_min_max=True    : Exclude the minimum and maximum values in x
                                   before considering the fraction of points
                                   that will not be saturated.
        saturation_values=[1,0] : The values which will replace normalized 
                                   values above 1.0 and below 0.0 respectively.
    Returns:
        normalized_x:           : x with all values mapped to be
                                   between 0.0 and 1.0.
    '''
    flx = x*1.0
    if hasattr(bounds, '__iter__'): 
        if len(bounds) == 2:
            new_min, new_max = bounds
        else:
            raise RuntimeError('bounds should be of length 2.  It is of length %d' % len(bounds))
    else: # bounds is a float then
        new_min, new_max = value_bounds([x], bounds, 
                                    exclude_min_max=exclude_min_max)
    # normalize
    rx = flx-new_min
    rx = rx/(new_max-new_min)
    # saturate
    rx[rx>1.0] = saturation_values[0]
    rx[rx<0.0] = saturation_values[1]
    return rx

def get_n_median_values(image_list, expand_from_min=True):
    min_vals = []
    median_vals = []
    for image in image_list:
        if expand_from_min:
            this_image = image
        else:
            this_image = -image
        min_vals.append(numpy.min(this_image))
        pegged_image = this_image-min_vals[-1]
        median_vals.append(non_min_median(pegged_image))
    return numpy.average(min_vals), numpy.average(median_vals)

def non_min_median(x):
    '''
    Returns the median of x excluding the minimum value in x no matter how
        many times this minimum value occurs in x.
    '''
    return numpy.median(x[x>numpy.min(x)])
        
def n_median_normalize(image, n=2, expand_from_min=True,
                       min_val=None, median_val=None):
    '''
    Return the image, but normalized so that min(image) = 0 and
        n*median(image) = 1
    inputs:
        image       : an nxm numpy array of floats
        n           : How many medians above min will become the new max
    '''
    if expand_from_min:
        this_image = image
    else:
        this_image = -image

    if min_val is None: 
        min_val = numpy.min(this_image)

    pegged_image = this_image-min_val
    if median_val is None: 
        median_val = non_min_median(pegged_image)
    normalized_image = pegged_image/(n*median_val)

    # cap large values at 1.0
    normalized_image[normalized_image>1.0] = 1.0
    normalized_image[normalized_image<0.0] = 0.0
    if expand_from_min:
        return_image = normalized_image
    else:
        return_image = 1.0-normalized_image
    return return_image

def get_prev_stim(sample_frame, shade):
    '''
    Return a 2D numpy array of the same dimensions as sample_frame, but
        with all values between 0-255 based on the value of shade between
        -1.0 and 1.0.
    '''
    return_frame = copy.deepcopy(sample_frame)
    frame_val = int(255 * (shade+1.0)/2.0)
    return_frame[:][:] = frame_val
    return return_frame

def get_stim(time, frames, frame_times, prev_frame, subsequent_frame):
    '''
    Figures out what frame was being shown at a particular time.
    '''
    ft = numpy.array(frame_times[1:])
    avg_frame_duration = numpy.average(ft[1:] - ft[:-1])
    if time >= frame_times[-1]+avg_frame_duration:
        return subsequent_frame
    if time < frame_times[0]:
        return prev_frame
    
    key_frame_index = bisect.bisect_left(ft, time)-1
    return frames[key_frame_index]

def stim_chunk_from_times_list(times_list, frames, frame_times, shade=1.0):
    """
    This function figures out what was displayed on the screen at a given,
        time and returns a 3 dimensional numpy array whose:
            shape[0] : is of length num_samples
            shape[1] : win_size[0]
            shape[2] : win_size[1] where win_size is the size, in pixels
                                   window used to generate the frames.
    Inputs:
        times_list      : The times where we care what was presented on the 
                           screen
        frames          : The frames representing the stimulus on the screen. (unformatted)
        frame_times     : the times when the frames were presented.
        shade=1.0       : The window background shade prior to stimulation.
    """
    frames = format_frames(frames)
    prev_frame = get_prev_stim(frames[0], shade)
    subsequent_frame = prev_frame

    begin_time = times_list[0]
    for i, ftime in enumerate(frame_times):
        # find the first time greater than begin_time.
        if ftime > begin_time:
            begin_i = i
            break

    ri = 0
    current_time = times_list[ri]
    x = frames.shape[1]
    y = frames.shape[2]
    return_value = []
    for time in times_list:
        return_value.append(get_stim(time, frames, frame_times, 
                                     prev_frame, subsequent_frame))
    return numpy.array(return_value)

def stim_chunk(end_time, tau, num_samples, frames, frame_times, shade=1.0):
    """
    This function figures out what was displayed on the screen at a given,
        time and returns a 3 dimensional numpy array whose:
            shape[0] : is of length num_samples
            shape[1] : win_size[0]
            shape[2] : win_size[1] where win_size is the size, in pixels
                                   window used to generate the frames.
    Inputs:
        end_time        : The time of the 'event' or spike.
        tau             : The length of time before the 'event' or spike that
                          we're calculating the spike triggered average for.
        num_samples     : len(return_value), number of time samples in chunk.
        frames          : The frames representing the stimulus on the screen.
        frame_times     : the times when the frames were presented.
        shade=1.0       : The window background shade prior to stimulation.
    """
    prev_frame = get_prev_stim(frames[0], shade)
    # ignore the first value of frame_times, since it is always 0.0
    tframe_times = frame_times[1:]
    if end_time < 0.0:
        return None
    if end_time > tframe_times[-1]:
        return None

    begin_time = end_time - tau
    times = numpy.linspace(begin_time, end_time, num_samples)

    for i, ftime in enumerate(tframe_times):
        # find the first time greater than begin_time.
        if ftime > begin_time:
            begin_i = i
            break

    ri = 0
    current_time = times[ri]
    x = frames.shape[1]
    y = frames.shape[2]
    return_value = numpy.empty((num_samples,x,y), dtype=numpy.uint32)
    for i in xrange(begin_i-1,len(tframe_times)-1):
        # if i < 0 add in prev_frame instead of frames[i]
        if i < 0:
            while current_time < 0.0:
                # keep putting in the prev_frame
                return_value[ri] = prev_frame
                ri += 1
                if ri == len(times):
                    return return_value
                current_time = times[ri]
        else:
            while current_time < tframe_times[i+1]:
                # keep putting in the current frame
                return_value[ri] = frames[i]
                ri += 1
                if ri == len(times):
                    return return_value
                current_time = times[ri]

