import numpy
from numpy.ma import multiply as numpy_ma_multiply
from numpy.ma import resize   as numpy_ma_resize
from numpy.ma import empty    as numpy_ma_empty
from .ancillaryvariables import AncillaryVariables
from .cellmethods        import CellMethods
from .coordinate         import Coordinate
from .coordinatebounds   import CoordinateBounds
from .data               import Data
from .fieldlist          import FieldList
from .variable           import Variable

def collapse(fields, method, axes=None, weights='default', unbiased=False,
             ignore_size1=True, squeeze=False):
    '''

Collapse fields' dimensions.

:Parameters:

    fields :  (sequence of) Field or FieldList
        The field or fields to be collapsed.

    method : str or CellMethods
        There are two ways of setting the collapse method:

            * A string containing a single method (such as 'min').

                All of the dimensions specified with the `axes`
                parameter are collapsed simultaneously with this
                method.
                  
            * A CF cell methods string (such as 'time: max') or a
              CellMethods object equivalent to such a string.

                This method specifies both collapse methods and the
                axes to apply them to. Each name in the string is
                interpreted as for the `axes` parameter. Multiple cell
                methods may be specified (such as 'time: max height:
                mean'), in which case each collapse is carried out
                independently and in the order that they are given.

            * A sequence of method strings (such as ['mid_range',
              'variance'])

    axes : scalar or sequence, optional

    weights : str or None or dict, optional

    unbiased : bool, optional
        If True then the calculate unbiased statisics where
        applicable. By default biased statistics are calculated.

    ignore_size1 : bool, optional
        If False then raise a ValueError if a dimension to be
        collapsed has size 1, regardless of whether or not it spans
        the data array. By default size 1 collapse dimensions are
        ignored, regardless of whether or not they span the data
        array.

    squeeze : bool, optional
        If True then remove collapsed dimensions from the data array.
        By default collapsed dimensions are retained with size 1 if
        they were spanned by the original data array.

:Returns:

   out : Field or FieldList

**Examples**

'''
    # ---------------------------------------------------------------
    # Flatten an input list or tuple of fields and field lists into a
    # single field list
    # ---------------------------------------------------------------
    if isinstance(fields, (list, tuple)):
        fields = [f for element in fields for f in element]
#        temp = FieldList()
#        for f in fields:
#            temp.extend(f)
#
#        fields = temp
    #--- End: if

    # If method is a cell methods string (such as 'area: mean time:
    # max') then convert it to a CellMethods object
    if ':' in method:
        method = CellMethods(method)

    # ================================================================
    # If method is a (now) a CellMethods object then process its
    # methods one by one with recursive calls and then return.
    # ================================================================
    if isinstance(method, CellMethods):
        if axes is not None:
            raise ValueError(
                "Can't collapse: Can't set both cell methods and axes")

        m = [x['method'] for x in method]
        a = [x['name']   for x in method]
        for name in a:
            if None in name:
                raise ValueError(
                    "Can't collapse: Invalid cell method name '%s'" % None)
        #--- End: if

        # Collapse according to each cell method in turn with
        # recursive calls
        new = fields.copy()
        while m:
            new = collapse(new, m.pop(0), axes=a.pop(0), weights=weights, 
                           unbiased=unbiased, ignore_size1=ignore_size1,
                           squeeze=squeeze)
        #--- End: while

        return new
    #--- End: if

    # ================================================================
    # Still here? Then method is a string describing a single method
    # (such as 'mean', for example).
    # ================================================================
    out = FieldList()

    for f in fields:
        new = f.copy()

        #-------------------------------------------------------------
        # Parse the collapse axes
        #-------------------------------------------------------------
        axes, dims = f._parse_axes(axes, 'collapse', dims=True,
                                   ignore=ignore_size1,
                                   default=range(f.ndim))
            
        if ignore_size1:
            # Ignore size 1 dimensions
            dims = [dim for dim in dims if f.space.dimension_sizes[dim] > 1]
            axes = f._parse_axes(dims, 'collapse')
        else:
            # Fail if any size 1 dimensions are specified
            for dim in dims:
                if f.space.dimension_sizes[dim] == 1:
                    raise ValueError(
                        "Can't collapse: Size 1 dimension has been specified")
        #--- End: if

        # ------------------------------------------------------------
        # Calculate the default weights, if required
        # ------------------------------------------------------------
        if weights is 'default':
            wdims = dims[:]
            weights = {}
            
            # Get weights from cell measures, if possible.
            for key, cm in f.space.cell_measures().iteritems():
                if cm.size == 1:
                    wdims = [dim for dim in wdims 
                             if dim not in f.space.dimensions[key]]
                    continue

                cm = cm.copy()

                cm_dims = f.space.dimensions[key][:]
                for dim in cm_dims[:]:
                    if dim not in f.space.dimensions['data']:
                        cm.squeeze(dim)
                        cm_dims.remove(dim)
                #--- End: for

                weights[tuple(cm_dims)] = cm.Data

                wdims = [dim for dim in wdims if dim not in cm_dims]
            #--- End: for

            # Get weights for any remaining dimensions from dimension
            # coordinates, if possible.
            if wdims:
                s = f.space.analyse()
                for dim in wdims: 
                    if dim in s['dim_to_coord']:
                        coord = s['dim_to_coord'][dim]
                        if coord.size > 1:
                            weights[dim] = coord
            #--- End: if

        elif weights is None:
            weights = {}
        elif isinstance(weights, dict):
            weights.copy()
            # Delete weights for size 1 dimensions
            for dim, size in f.space.dimension_sizes.iteritems():
                if size == 1 and dim in weights:
                    del weights[dim]
            #--- End: for
        else:
            raise TypeError("Can't collapse: Invalid weights: %s" % repr(dict))

        # ------------------------------------------------------------
        # Parse the weights which are Variables into Data objects (or
        # None). If a weight is a coordinate then it will be converted
        # first.
        # ------------------------------------------------------------
        for key, value in weights.iteritems():
            if isinstance(value, Coordinate) and value.ndim <= 1:
                # Convert a coordinate into a weights Data object
                weights[key] = calc_weights(value, infer_bounds=True)
            elif isinstance(value, Variable):
                if value.dtype.char != 'S':
                    # Replace a non-string variable with its Data
                    # object
                    weights[key] = value.Data
                else:
                    # String-valued weights are equal weights
                    weights[key] = None
        #--- End: for

        # ------------------------------------------------------------
        # Collapse the data array
        # ------------------------------------------------------------
        new.Data = collapse_data(f.Data, method, axes=axes, weights=weights,
                                 unbiased=unbiased)

        # ------------------------------------------------------------
        # Update the cell methods
        # ------------------------------------------------------------
        update_cell_methods(new, method, dims)

        # ------------------------------------------------------------
        # Update ancillary variables
        # ------------------------------------------------------------
        update_ancillary_variables(new, dims)

        #-------------------------------------------------------------
        # Collapse the space
        #-------------------------------------------------------------
        collapse_space(new.space, dims)

        # ------------------------------------------------------------
        # Squeeze collapsed dimensions
        # ------------------------------------------------------------
        if squeeze:
            new.squeeze(dims)

        # We need to finalize the new field because some of the
        # coordinates will have been redefined
        new.finalize()

        out.append(new)
    #--- End: for

    if isinstance(fields, out[0].__class__):
        # Return a Field
        return out[0]
    else:
        # Return a FieldList
        return out
#--- End: def

def collapse_data(data, method, axes=None, weights={}, unbiased=False):
    '''

Return a data array collapsed over one or more dimensions.

:Parameters:

    data : Data

    method : str

    axes : optional

    weights : dict, optional

:Returns:

    out : Data

**Examples**

'''
    #-----------------------------------------------------------------
    # Parse the collapse axes
    #-----------------------------------------------------------------
    axes, dims = data._parse_axes(axes, 'collapse', dims=True,
                                  default=range(data.ndim))
    axes, dims = zip(*sorted(zip(axes, dims)))
    axes = list(axes)

    #-----------------------------------------------------------------
    # Initialize the output data array
    #-----------------------------------------------------------------
    data.to_memory()

    indices = [slice(None)] * data.ndim
    for i in axes:
        indices[i] = 0

    new = data[tuple(indices)]
    
    # Squeeze the collapse axes out of the output data. This is
    # important. But note that we'll put them back at the end.
    new.squeeze(axes)

    dimensions = data.dimensions
    dims = [dimensions[i] for i in axes]

    direction = data.direction
    directions = [direction[dim] for dim in dims]

    #-----------------------------------------------------------------
    # Transpose the input data so that the collapse dimensions are all
    # in the fastest varying (outer) positions.
    # ----------------------------------------------------------------
    non_collapse_axes = [i for i in xrange(data.ndim) if i not in axes]
    data = data.copy()
    data.transpose(non_collapse_axes + axes)

    #-----------------------------------------------------------------
    # Parse the weights.
    #
    # * Change the keys from dimension names to the integer positions
    #   of the dimensions.
    #
    # * Make sure all non-null weights are Data objects.
    # -----------------------------------------------------------------
    if weights:
        weights = weights.copy()

        dimensions = data.dimensions

        for key in weights.keys():
            weight = weights.pop(key)

            # Ignore undefined weights
            if weight is None:
                continue
            
            # Make sure the weight is a Data object
            if not isinstance(weight, data.__class__):
                weight = type(data)(weight)
                
            # Ignore size 1 weights
            if weight.size == 1:
                continue

            if key in dimensions:
                weights[(dimensions.index(key),)] = weight
            elif isinstance(key, tuple):
                a = [key.index(dim) for dim in dims if dim in key]
                if a not in ([], range(len(key))):
                    nc = [i for i in xrange(len(key)) if i not in a]
                    weight = weight.copy()
                    weight.transpose(nc + a)
                    
                    key = [key[i] for i in nc+a]
                #--- End: if

                if not set(key).issubset(dimensions):
                    raise ValueError(
                        "Can't collapse: Incorrect weights dimensions: %s" %
                        repr(key))

                weights[tuple([dimensions.index(k) for k in key])] = weight
        #--- End: for

        # Check the shape of the weights match the sizes of their
        # corresponding data array dimensions.
        data_shape = data.shape
        for key, weight in weights.iteritems():
            weight_shape = weight.shape
            for i, j in enumerate(key):
                if weight_shape[i] != data_shape[j]:
                    raise ValueError(
                        "Can't collapse: Incorrect weights shape: %s" %
                        repr(weight.shape))
    #--- End: if

    if not weights:
        weights = None

    #-------------------------------------------------------------
    # Set the output data-type
    # ------------------------------------------------------------
    datatype = data.dtype
    if datatype.kind is 'i' and method not in ['max', 'min', 'mode']:
        # The output data-type is float if data is of integer type
        datatype = numpy.dtype(float)

    # ----------------------------------------------------------------
    # Define the collapse function
    # ----------------------------------------------------------------
    function = globals()['_%s' % method]

    save = new.save_to_disk()
    for partition in new.partitions.flat():

        partition.part = []

        # We need to set partition.dimensions because Data.squeeze() does not
        # change it.
        partition.dimensions = new.dimensions[:]

        # Initialize an empty array for this partition of the output
        # collapsed data array
        array = numpy_ma_empty(partition.shape, dtype=datatype)

        # Set each element of the output partition's array
        for a_indices, m_indices in zip(partition.iterarray_indices(),
                                        partition.itermaster_indices()):
            array[a_indices] = function(data, m_indices, weights=weights,
                                        unbiased=unbiased)
        #--- End: for

        partition.data = array
        partition.close(save)
    #--- End: for

    # Put the collapsed dimensions back in with size 1
    for axis, dim, direction in zip(axes, dims, directions):
        new.expand_dims(axis, dim, direction)

    # ----------------------------------------------------------------
    # Return the collapsed data
    # ----------------------------------------------------------------
    return new
#--- End: def

def collapse_space(space, dims):
    '''

:Parameters:

    space : Space

    dims : sequence of strs

:Returns:

    None

'''
    for dim in dims:
        # Ignore dimensions which are already size 1
        if space.dimension_sizes[dim] == 1:
            continue
        
        if dim in space:
            # Derive a new size 1 dimension coordinate for this
            # dimension
            coord = space[dim]
        
            if hasattr(coord, 'bounds'):
                bounds = [coord.bounds.first_datum, coord.bounds.last_datum]
            else:
                bounds = [coord.first_datum, coord.last_datum]
        
            array = 0.5 * (bounds[0] + bounds[1])
        
            coord.Data = Data([array], coord.Units, coord._FillValue)
        
            coord.bounds = CoordinateBounds()
            coord.bounds.Data = Data([bounds], coord.Units, coord._FillValue)

            space.dimension_sizes[dim] = 1

            # Remove formula_terms transforms referenced by this
            # coordinate
            if hasattr(coord, 'transforms'):
                for t in coord.transforms[:]:              
                    if t not in space.transforms:
                        coord.transforms.remove(t)
                    elif space.transforms[t].isformula_terms:
                        coord.transforms.remove(t)
                        space.transforms.pop(t)                    
                #--- End: for
                if not coord.transforms:
                    del coord.transforms
        #--- End: if
        
        # Remove all auxiliary coordinates which span this dimension
        for aux in space.aux_coords(dim):
            space.pop(aux)
        
        # Remove all cell measures which span this dimension
        for cm in space.cell_measures(dim):
            space.pop(cm)
    #--- End: for
#--- End: def

def update_cell_methods(f, method, dims):
    '''

:Parameters:

    f : Field

    method : str

    dims : list of strs

:Returns:

    None

**Examples**

'''
    if not hasattr(f, 'cell_methods'):
         f.cell_methods = CellMethods()

    space = f.space

    name = []
    for dim in dims:
        if dim in space:
            name.append(space[dim].identity(default=dim))
        else:
            name.append(dim)
    #--- End: for

    string = '%s: %s' % (': '.join(name), method)

    cell_method = CellMethods(string)
    
    cell_method[0]['dim'] = dims[:]

    f.cell_methods += cell_method
#--- End: def

def update_ancillary_variables(f, collapse_dims):
    '''

:Parameters:

    f : Field

    collapse_dims : sequence of strs

:Returns:

    None

**Examples**

'''
    if not hasattr(f, 'ancillary_variables'):
        return

    new_av = AncillaryVariables()

    for av in f.ancillary_variables:

        dim_map = av.space.map_dims(f.space)

        # Don't keep the ancillary variable if its data array
        # has an undefined dimension with size > 1.
        keep = True
        for dim1 in set(av.space.dimensions['data']).difference(dim_map):
            if av.space.dimension_sizes[dim1] > 1:
                keep = False
                break
        #--- End: for
        if not keep:
            continue

        # Don't keep the ancillary variable if its data array
        # has a defined dimension with size > 1 which is a
        # collapse dimension.
        keep = True
        for dim1, dim0 in dim_map.iteritems():
            if dim0 in collapse_dims and av.space.dimension_sizes[dim1] > 1:
                keep = False
                break
        #--- End: for
        if not keep:
            continue

        new_av.append(av)
    #--- End: for

    if new_av:
        f.ancillary_variables = new_av
    else:
        del f.ancillary_variables 
#--- End: def

def _max(data, indices, *args, **kwargs):
    '''
        
Return the maximum value of the entire data array.

If the entire array is masked then the returned value will be
`cf.masked`.

:Parameters:

    data : Data

    args, kwargs :
        Ignored

:Returns:

    out : scalar
        The maximum value, or `cf.masked` if the entire array is
        masked.

**Examples**

>>> print d
[0 1 2 --]
>>> _max(d)
2
>>> d[...] = cf.masked
>>> print d
[-- -- -- --]
>>> _max(d)
masked

'''
    out = _weighted_map(data, numpy.ma.max, indices, 1, data.dtype)
    return numpy.ma.max(out)
#--- End: def

def _mean(data, indices, weights=None, **kwargs):
    '''
        
Return the weighted mean of the entire data array.

If no weights are specified then equal weights are assumed.

The return type is numpy.float64 if the data array is of integer type,
otherwise it is of the same type as data data array.

:Parameters:

    data : Data

    indices: tuple

    weights : dict, optional        

:Returns:

    out : scalar
        The weighted mean.

'''
    datatype = data.dtype
    if datatype.kind is 'i':
        # The output data-type is float if data is of integer type
        datatype = numpy.dtype(float)

    out = _weighted_map(data, numpy.ma.average, indices, 2, datatype,
                        weights=weights, returned=True)

    if data.psize == 1:
        mean = out[0, 0]
    else:
        mean = numpy.ma.average(out[:, 0], weights=out[:, 1])

    return mean
#--- End: def

def _mid_range():
    '''
 
Return the average of the minimum and maximum values of the entire
data array.

If the entire array is masked then the returned value will be
`cf.masked`.

:Parameters:

    data : Data

    args, kwargs :
        Ignored

:Returns:

    out : scalar
        The mid range value, or `cf.masked` if the entire array is
        masked.

**Examples**

>>> print d
[-- 1 2 4]
>>> _mid_range(d)
2.5
>>> d[...] = cf.masked
>>> print d
[-- -- -- --]
>>> _mid_range(d)
masked

'''
    return 0.5*(_min(data) + _max(data))
#--- End: def

    pass

def _min(data, indices, *args, **kwargs):
    '''
        
Return the minimum value of the entire data array.

If the entire array is masked then the returned value will be
`cf.masked`.

The return type is numpy.float64 if the data array is of integer type,
otherwise it is of the same type as data data array.

:Parameters:

    data : Data

    args, kwargs :
        Ignored

:Returns:

    out : scalar
        The minimum value, or `cf.masked` if the entire array is
        masked.

**Examples**

>>> print d
[-- 1 2 3]
>>> _min(d)
1
>>> d[...] = cf.masked
>>> print d
[-- -- -- --]
>>> _min(d)
masked

'''  
    out = _weighted_map(data, numpy.ma.min, indices, 1, data.dtype)
    return numpy.ma.min(out)
#--- End: def

def _mode():
    '''

Return the mode of the entire data array.
    
If the entire array is masked then the returned value will be
`cf.masked`.

'''
    raise NotImplementedError("Haven't coded mode, yet")
#--- End: def

def _standard_deviation(data, indices, weights=None, unbiased=False):
    '''

Return the standard deviation of the entire data array.
    
If the entire array is masked then the returned value will be
`cf.masked`.

The return type is numpy.float64 if the data array is of integer type,
otherwise it is of the same type as data data array.

:Parameters:

    data : Data

    indices: tuple

    unbiased : bool, optional
        If True then the unbiased sample standard deviation will be
        returned. By default the biased population standard deviation
        will be returned.

    weights : dict, optional        

:Returns:

    out : scalar
        The standard deviation, or `cf.masked` if the entire array is
        masked.

**Examples**

>>> print d
[-- 1 2 3]
>>> _standard_deviation(d)

>>> d[...] = cf.masked
>>> print d
[-- -- -- --]
>>> _standard_deviation(d)
masked

'''
    variance = _variance(data, indices, weights=weights, unbiased=unbiased)
    return variance**0.5
#--- End: def

def _sum(data, indices, *args, **kwargs):
    '''
        
Return the sum of the entire data array.

If the entire array is masked then the returned value will be
`cf.masked`.

The return type is numpy.float64 if the data array is of integer type,
otherwise it is of the same type as data data array.

:Parameters:

    data : Data

    args, kwargs :
        Ignored

:Returns:

    out : scalar
        The sum value, or `cf.masked` if the entire array is masked.

**Examples**

>>> print d
[-- 1 2 3]
>>> _sum(d)
6
>>> d[...] = cf.masked
>>> print d
[-- -- -- --]
>>> _sum(d)
masked

'''
    datatype = data.dtype
    if datatype.kind is 'i':
        # The output data-type is float if data is of integer type
        datatype = numpy.dtype(float)

    out = _weighted_map(data, numpy.ma.sum, indices, 1, datatype)
    return numpy.ma.sum(out)
#--- End: def

def _variance(data, indices, weights=None, unbiased=False):
    '''

Return the variance of the entire data array.

The biased population variance or the unbiased sample variance may be calculated.
    
If the entire array is masked then the returned value will be
`cf.masked`.

The return type is numpy.float64 if the data array is of integer type,
otherwise it is of the same type as data data array.

:Parameters:

    data : Data

    indices: tuple

    unbiased : bool, optional
        If True then the unbiased sample variance will be returned. By
        default the biased population variance will be returned.

    weights : dict, optional        

:Returns:

    out : scalar
        The variance, or `cf.masked` if the entire array is masked.

**Examples**

>>> print d
[-- 1 2 3]
>>> _variance(d)

>>> d[...] = cf.masked
>>> print d
[-- -- -- --]
>>> _variance(d)
masked

'''
    # Methodology:
    # http://en.wikipedia.org/wiki/Standard_deviation#Population-based_statistics
    # http://en.wikipedia.org/wiki/Weighted_mean#Weighted_sample_variance

    def _components(array, weights=None):
        '''

Return selected statistics derived from an array:
* mean of the array
* sum of weights for the array
* sum of squares of weights for the array
* biased population variance of the array

:Parameters:

    array : array-like
        The array from which the statistics are calculated.

    weights : array-like, optional
        The importance that each element has in the computations. If
        set then the weights array must have the same shape as
        `array`. By default all data in `array` are assumed to have a
        weight equal to one.

:Returns:

    out : numpy.ma.array
        The output array is structured as follows:
        * out[0] -> mean of the array
        * out[1] -> sum of weights for the array
        * out[2] -> sum of squares of weights for the array
        * out[3] -> biased population variance of the array

'''
        n = numpy.ma.count(array)

        if not n:
            # Every element of this array is masked
            out = numpy_ma_empty(4)
            out[...] = numpy.ma.masked

        else:
            # Some elements of this this array are unmasked
            if weights is None:
                # Unweighted
                mean   = numpy.ma.average(array)
                sw     = n
                sw2    = n
                sigma2 = numpy.ma.sum((array - mean)**2)/sw
            else:
                # Weighted
                mean, sw = numpy.ma.average(array, weights=weights,
                                            returned=True)
                sw2 = numpy.ma.sum(weights**2)
                sigma2 = numpy.ma.sum(weights*(array - mean)**2)/sw
            #--- End: if

            out = numpy.ma.array([mean, sw, sw2, sigma2])
        #--- End: if

        return out
    #--- End: def

    datatype = data.dtype
    if datatype.kind is 'i':
        # The output data-type is float if data is of integer type
        datatype = numpy.dtype(float)

    out = _weighted_map(data, _components, indices, 4, datatype,
                        weights=weights)

    # ----------------------------------------------------------------
    # Calculate:
    # * The global mean (if required)
    # * The global sum of weights
    # * The global sum of squares of weights 
    # * The global variance
    # ----------------------------------------------------------------
    if data.psize == 1:
        gvariance = out[0, 3]
        gsum_of_weights  = out[0, 1]
        gsum_of_weights2 = out[0, 2]

    else:
        gmean = numpy.ma.average(out[:, 0], weights=out[:, 1])

        gvariance        = 0.0
        gsum_of_weights  = 0.0
        gsum_of_weights2 = 0.0
        for mean, sw, sw2, variance in out:
            gvariance += sw*(variance + mean**2)
            gsum_of_weights  += sw
            gsum_of_weights2 += sw2
        #--- End: for
            
        gvariance /= gsum_of_weights
        gvariance -= gmean ** 2
    #--- End: if

    if unbiased:
        # Convert the population varianace to the unbiased sample
        # variance
        factor = gsum_of_weights/(gsum_of_weights**2 - gsum_of_weights2) 
        gvariance *= gsum_of_weights * factor
    #--- End: if

    return gvariance
#--- End: def

def _weighted_map(data, function, indices, dimensions, datatype, **kwargs):
    '''

Return a list of the results of applying the function to the
partitions of the arguement Data object.

All keyword arguments are passed into the function call. If there is a
`weights` keyword argument then this should either evaluate to False
or be a dictionary of weights for at least one of the data dimensions.

:Parameters:

    data : Data

    function : function

    indices: tuple or None

    dimensions : int

    datatype : numpy.dtype

    kwargs :

:Returns:

    out : numpy.ma.array

**Examples**

'''

    def _create_weights(partition, master_indices, master_weights):
        '''

:Parameters:

    partition : Partition

    master_weights : dict

    master_indices : tuple
:Returns:

    out : numpy.ma.array

**Examples**

'''
        partition_indices = partition.indices
        partition_shape   = partition.shape
        base_shape = [1] * len(partition_shape)

        master_indices += partition_indices[len(master_indices):]

        weights = []

        for key, weight in master_weights.iteritems():
            shape = base_shape[:]
            indices = []
            for i in key:
                shape[i] = partition_shape[i]
                indices.append(master_indices[i])
            #--- End: for

            weight = weight[tuple(indices)].array
            weights.append(weight.reshape(shape))
        #--- End: for

        weights = reduce(numpy_ma_multiply, weights)

        # Broadcast the weights across all dimensions of the
        # partition, and return them
        return numpy_ma_resize(weights, partition_shape)
    #--- End: def

    data = data[indices]

    if dimensions == 1:
        size = data.psize
    else:
        size = (data.psize, dimensions)

    out = numpy_ma_empty(size, dtype=datatype)

    if 'weights' in kwargs:
        weights = kwargs['weights']
        kwargs = kwargs.copy()
    else:
        weights = None

    conform_args = data.conform_args(revert_to_file=True)

    for i, partition in enumerate(data.partitions.flat()):
        array = partition.conform(**conform_args)
        if weights is not None:
            kwargs['weights'] = _create_weights(partition, indices, weights)

        out[i] = function(array, **kwargs)
        partition.close()
    #--- End: for

    return out
#--- End: def

def calc_weights(coord, infer_bounds=False):
    '''

Calculate weights for collapsing. See def collapse for help

'''
    def _equal():
        '''

Calculate equal weights

'''
        return None
    #--- End: def

    def _linear(coord, infer_bounds):
        '''

Calculate linear weights based on absolute cell bounds.
'''
        bounds = _find_bounds(coord, infer=infer_bounds)

        return abs(bounds[..., 1] - bounds[..., 0])
    #--- End: def

    def _latitude_area(coord, infer_bounds):
        '''

Calculate latitude-area weights based on cell bounds

'''
        bounds = _find_bounds(coord, infer=infer_bounds)
        bounds.sin()

        return abs(bounds[..., 1] - bounds[..., 0])
    #--- End: def

    def _find_bounds(coord, infer=False):
        '''

Find a coordinate's bounds

Either return its existing bounds or guess the bounds as the midpoints
of the coordinate values with extrapolation at the end points.

:Parameters:

    coord : Coordinate

    infer : bool

:Returns:

    out : CoordinateBounds or None

**Examples**

'''

        if hasattr(coord, 'bounds'):
            bounds = coord.bounds.Data.copy()

        elif infer:
            c = coord.array

            bounds = numpy_ma_empty((coord.size, 2), dtype=c.dtype)

            bounds[0, 0]  = c[0]*1.5 - c[1]*0.5
            bounds[1:, 0] = (c[0:-1] + c[1:])*0.5

            bounds[0:-1, 1] = bounds[1:, 0]
            bounds[-1, 1]   = c[-1]*1.5 - c[-2]*0.5

            bounds = Data(bounds, coord.Units, coord._FillValue)
            
        else:
            raise ValueError("sdf6671nsdff0283  &&&&&")
        #--- End: if
                    
        # Clip out-of-bounds latitude values
        if coord.standard_name in ('latitude', 'grid_latitude'):           
            pi_by_2 = numpy.pi/2.0
            bounds.clip(-pi_by_2, pi_by_2, 'radians')
        #--- End: if

        return bounds            
    #--- End: def

    if coord.size == 1 or coord.dtype.kind == 'S':
        return _equal()

    elif coord.standard_name in ('latitude', 'grid_latitude'):
        return _latitude_area(coord, infer_bounds=infer_bounds)
    
    else:    
        return _linear(coord, infer_bounds=infer_bounds)
#--- End: def
