import numpy
from numpy import dtype       as numpy_dtype
from numpy import result_type as numpy_result_type
from numpy.ma  import is_masked    as numpy_ma_is_masked
from numpy.ma  import getmaskarray as numpy_ma_getmaskarray
from numpy.ma  import getmask      as numpy_ma_getmask
from numpy.ma  import where        as numpy_ma_where
from numpy.ma  import nomask       as numpy_ma_nomask
from numpy.ma  import masked       as numpy_ma_masked
from copy      import copy
from operator  import mul
from hashlib   import sha1  as hashlib_sha1
from marshal   import dumps as marshal_dumps
from json      import loads as json_loads
from ast       import literal_eval as ast_literal_eval
from .units           import Units
from .functions       import (CHUNKSIZE, FM_THRESHOLD,  RTOL, ATOL, 
                              iterindices, parse_indices, _allclose)
from .partition       import Partition
from .partitionarray  import PartitionArray, empty
from .netcdf.filearray import NetCDFFileArray
from .pp.filearray     import PPFileArray

_meminfo_fields = set(('SReclaimable:', 'Cached:', 'Buffers:', 'MemFree:'))
_meminfo_file   = open('/proc/meminfo', 'r', 1)

def free_memory():
    '''

The amount of available physical memory in kibibytes.

This amount includes any memory which is still allocated but is no
longer required.

:Returns:

    out : float
        The amount of available physical memory in kibibytes.

.. warning::
   
   This function has been written for linux platforms. Other cases
   will be required for Unix, Mac, Windows, etc.

**Examples**

>>> free_memory()
96496240.0

'''
    # ----------------------------------------------------------------
    #
    # LINUX: The available physical memory is the sum of the values of
    #        the 'SReclaimable', 'Cached', 'Buffers' and 'MemFree'
    #        entries in the /proc/meminfo file.
    #
    # ----------------------------------------------------------------
    free_kB = 0.0
    n=0

    for line in _meminfo_file:
        field_size = line.split()
        if field_size[0] in _meminfo_fields:
            free_kB += float(field_size[1])
            n += 1
            if n > 3:
                break
    #--- End: for
    _meminfo_file.seek(0)

    return free_kB
#--- End: def


# ====================================================================
#
# Data object
#
# ====================================================================

class Data(object):
    '''

An N-dimensional data array with units and masked values.

* Contains an N-dimensional, indexable and broadcastable array with
  many similarities to a numpy array.

* Contains the units of the array elements.

* Supports masked arrays, regardless of whether or not it was
  initialized with a masked array.

* Uses Large Amounts of Massive Arrays (LAMA) functionality to store
  and operate on arrays which are larger then the available memory.


**Indexing**

A data array is indexable in a similar way to numpy array but for two
important differences:

* Size 1 dimensions are never removed.

  An integer index i takes the i-th element but does not reduce the
  rank of the output array by one.

* When advanced indexing is used on more than one dimension, the
  advanced indices work independently.

  When more than one dimension's slice is a 1-d boolean array or 1-d
  sequence of integers, then these indices work independently along
  each dimension (similar to the way vector subscripts work in
  Fortran), rather than by their elements.

**Miscellaneous**

Data objects are hashable. Note that, since Data objects are mutable,
their hash values may change if created at different times.

**Examples**

>>> d.shape
(12, 19, 73, 96)
>>> d[0, :, [0,1], [0,1,2]].shape
(1, 19, 2, 3)

'''
    __slots__ = ('_dtype',
                 '_FillValueP',
                 '_hardmask',
                 '_ndim',
                 '_shape',
                 '_size',
                 '_Units',
                 'dimensions',
                 'partitions')
    
    def __init__(self, data=None, units=None, _FillValue=None, hardmask=True,
                 chunk=False,
                 json=None, shape=None, dimensions=None, dtype=None, _filename=None):
        '''

**Initialization**

:Parameters:

    data : array-like, optional
        The data for the array.

    units : str or Units, optional
        The units of the data. By default the array elements are
        dimensionless.

    _FillValue : optional 
        The fill value of the data. By default the numpy fill value
        appropriate to the array's data type will be used.

    hardmask : bool, optional
        If False then the mask is soft. By default the mask is hard.

    chunk : bool, optional
        If True then the data array will be partitioned if it is
        larger than the chunk size. By default the data array will be
        stored in a single partition.

**Examples**

>>> d = cf.Data(5)
>>> d = cf.Data([1,2,3], units='K')
>>> import numpy   
>>> d = cf.Data(numpy.arange(10).reshape(2,5), units=cf.Units('m/s'), _FillValue=-999)
>>> d = cf.Data(('f', 'l', 'y'))

'''      

        # Set units if they're not already a Units object
        if units is None:            
            units = Units()

        if isinstance(units, basestring):
            try:
                units = Units(units)
            except ValueError:
                raise ValueError("Unsupported unit in %s initialization: '%s'" %
                                 (self.__class__.__name__, units))
        #--- End: if

        self.Units      = units.copy()
        self._FillValue = _FillValue
        self.hardmask   = hardmask

        if json is not None:
            self._dtype     = dtype
            self._shape     = tuple(shape)
            self._ndim      = len(shape)
            self._size      = long(reduce(mul, shape, 1))
            self.dimensions = dimensions
        
            self._decode_json(json, _filename)
            return
        #--- End: if

        if data is None:
            return

        if isinstance(data, (list, float, tuple, int, long, basestring)):
            data = numpy.array(data)
        
        shape = data.shape
        ndim  = data.ndim
        
        dimensions = [str(i) for i in xrange(ndim)]
        
        if not ndim:
            # Scalar data
            direction = True
        else:
            # 1 or more dimensional data
            direction = dict([(dim, True) for dim in dimensions])

        self.partitions = PartitionArray(
            [Partition(location  = [(0,n) for n in shape],
                       shape     = list(shape),
                       dimensions     = dimensions[:],
                       direction = copy(direction),
                       Units     = units.copy(),
                       part      = [],
                       data      = data)
             ],
#            dimensions = dimensions,
            direction  = direction,
            dimensions = [],
            )

        self.dtype = data.dtype

        self._ndim  = ndim
        self._shape = shape
        self._size  = data.size
       
        self.dimensions = dimensions
        '''

An ordered list of identities for dimension of the data array.

**Examples**

>>> d.ndim
3
>>> d.dimensions
['dim2', 'dim0', 'dim3'

>>> d.ndim
0
>>> d.dimensions
[]

'''

        if chunk:
            self.chunk()
    #--- End: def

    def __deepcopy__(self, memo):
        '''

Used if copy.deepcopy is called on the variable.

''' 
        return self.copy()
    #--- End: def

    def __getstate__(self):
        '''

Called when pickling.

:Parameters:

    None

:Returns:

    out : dict
        A dictionary of the instance's attributes

**Examples**

'''
        return dict([(attr, getattr(self, attr))
                     for attr in self.__slots__ if hasattr(self, attr)])
    #--- End: def        

    def __setstate__(self, odict):
        '''

Called when unpickling.

:Parameters:

    odict : dict
        The output from the instance's `__getstate__` method.

:Returns:

    None

'''
        for attr, value in odict.iteritems():
            setattr(self, attr, value)
    #--- End: def

    def __hash__(self):
        '''
Return the hash value of the data array.

Note that generating the hash temporarily realizes the entire array in
memory, which may not be possible for large arrays.

The hash value is dependent on the data type, shape and mask and
unmasked values of the data array, but is independent of data array
values underlying any masked elements.

The hash value is not guaranteed to be portable across versions of
Python, numpy and cf.

**Examples**

>>> print d.array
[[0 1 2 3]]
>>> d.hash()
-8125230271916303273
>>> d[1, 0] = numpy.ma.masked
>>> print d.array
[[0 -- 2 3]]
>>> hash(d)
791917586613573563
>>> d.hardmask = False
>>> d[0, 1] = 999
>>> d[0, 1] = numpy.ma.masked
>>> d.hash()
791917586613573563
>>> d.squeeze()
>>> print d.array
[0 -- 2 3]
>>> hash(d)
-7007538450787927902
>>> d.dtype = float
>>> print d.array
[0.0 -- 2.0 3.0]
>>> hash(d)
-4816859207969696442

'''
        h = hashlib_sha1()

        h.update(marshal_dumps(self.dtype.name))
        h.update(marshal_dumps(self.shape))

        array = self.array

        h.update(array.mask)

        if numpy_ma_is_masked(array):
            array.set_fill_value()
            h.update(array.filled())
        else:
            h.update(array.data)

        return hash(h.digest())
    #--- End: def

    def __nonzero__(self):
        '''
x.__nonzero__() <==> x != 0

'''
        if self.size == 1:
            return self.varray.__nonzero__()

        raise ValueError(
"The truth value of Data with more than one element is ambiguous. Use d.any() or d.all()")
    #--- End: def

    def __repr__(self):
        '''
x.__repr__() <==> repr(x)

'''
        return '<CF %s: Units = %s>' % (self.__class__.__name__, self.Units)
    #--- End: def

    def __str__(self):
        '''
x.__str__() <==> str(x)

'''
        return '%s: %s' % (self.__class__.__name__,
                           {'dtype'      : getattr(self, 'dtype', None),
                            'shape'      : self.shape,
                            'dimensions' : self.dimensions,
                            'ndim'       : self.ndim,
                            'size'       : self.size,
                            'direction'  : self.direction,
                            'pdimensions': self.pdimensions,
                            'Units'      : self.Units,
                            '_FillValue' : self._FillValue,
                            'hardmask'   : self.hardmask,
                           })
    #--- End: def

    def __getitem__(self, indices):
        '''
x.__getitem__(indices) <==> x[indices]

'''
        dim2position = dict([(dim, i) for i, dim in enumerate(self.dimensions)])

        indices, reverse_dimensions = _parse_indices(self, indices)

        # If it's a scalar, then just copy it. ??? dch trapped in _parse_indices??
        if not self.ndim and not indices:
            return self.copy()
        
        new            = type(self)()
        
        new.partitions = PartitionArray(_overlapping_partitions(self.partitions, 
                                                                indices, 
                                                                dim2position))

        new._shape      = tuple(map(_size_of_index, indices, self.shape))
        new._size       = long(reduce(mul, new.shape, 1))
        new.dimensions  = self.dimensions[:]
        new.direction   = copy(self.direction)
        new.Units       = self.Units.copy()
        new.pdimensions = self.pdimensions[:]
        new._FillValue  = self._FillValue
        new._ndim       = self.ndim
        new.dtype       = self.dtype
        new.hardmask    = self.hardmask

#        if hasattr(self, '_dtype'):
#            new.dtype = self.dtype

        new._set_location_map()
        
        # Reverse dimensions with flipped slices
        if reverse_dimensions:
            new.flip(reverse_dimensions)

        # Remove size 1 partition dimensions
        new.partitions.squeeze()
        
        return new
    #--- End: def

    def __setitem__(self, indices, value):
        '''
x.__setitem__(indices, y) <==> x[indices]=y

'''            
        def _mirror_index(index, size):
            '''

Note that it assumed that the step of the input slice is 1.

:Parameters:

    index : slice

    size : int

:Returns:

    out : slice

**Examples**

>>> s = slice(2, 6)
>>> t = _mirror_index(slice(2, 6), 8)
>>> t
slice(5, 1, -1)
>>> range(8)[s]
[2, 3, 4, 5]
>>> range(8)[t]
[5, 4, 3, 2]
>>> range(7, -1, -1)[t]
[2, 3, 4, 5]

'''
            start, stop, step = index.indices(size)
            size -= 1
            start = size - start
            stop  = size - stop
            if stop < 0:
                stop = None
                
            return slice(start, stop, -1)
        #--- End: def

        conform_args = self.conform_args()

        # ------------------------------------------------------------
        # parse the indices
        # ------------------------------------------------------------
        indices, reverse_dimensions = _parse_indices(self, indices)

        scalar_value = False
        if value is numpy_ma_masked:
            scalar_value  = True
        else:
            if not isinstance(value, Data):
                # Convert to the value to a Data object
                value = type(self)(value, units=self.Units)
            else:
                value = value.copy()
                if value.Units and value.Units.equivalent(self.Units):
                    value.Units = self.Units
                elif not value.Units:
                    value.override_units(self.Units)
                else:
                    raise ValueError(" Some bad units %s, %s" %
                                     (repr(self.Units), repr(value.Units)))
                #--- End: if

            if value.size == 1:
                scalar_value = True
                value = value.first_datum
        #--- End: if

        if scalar_value:
            # --------------------------------------------------------
            # The value is logically scalar
            # --------------------------------------------------------           
            for partition in self.partitions.flat():
                p_indices, shape = _partition_overlaps(partition, indices)
                if not p_indices:
                    # This partition does not overlap the indices
                    continue
                
                array = partition.conform(**conform_args)
                array[p_indices] = value
                partition.close()
            #--- End: for
            
            return
        #--- End: if

        # ------------------------------------------------------------
        # Still here? Then the value is not logically scalar.
        # ------------------------------------------------------------
        self_shape  = self.shape
        value_shape = value.shape
        
        shape0 = map(_size_of_index, indices, self_shape)
        self_ndim  = self.ndim
        value_ndim = value.ndim
        align_offset = self_ndim - value_ndim
        if align_offset >= 0:
            # self has more dimensions than other
            shape0   = shape0[align_offset:]
            shape1   = value_shape
            ellipsis = None 
            
            reverse_dimensions = [i - align_offset
                                  for i in reverse_dimensions
                                  if i >= align_offset]                
        else:
            # value has more dimensions than self
            v_align_offset = -align_offset
            if value_shape[:v_align_offset] != [1] * v_align_offset:
                # Can only allow value to have more dimensions then
                # self if the extra dimensions all have size 1.
                raise ValueError("Can't broadcast shape %s across shape %s" %
                                 (repr(value_shape), repr(self_shape)))
            
            shape1       = value_shape[v_align_offset:]
            ellipsis     = Ellipsis
            align_offset = 0
        #--- End: if

        # Find out which of the dimensions of value are to be
        # broadcast, and those which are not. Note that, as in numpy,
        # it is not allowed for a dimension in value to be larger than
        # a size 1 dimension in self
        base_value_indices       = []
        non_broadcast_dimensions = []

        for i, (a, b) in enumerate(zip(shape0, shape1)):
            if b == 1:
                base_value_indices.append(slice(None))
            elif a == b and b != 1:
                base_value_indices.append(None)
                non_broadcast_dimensions.append(i)
            else:
                raise ValueError("Can't broadcast shape %s across shape %s" %
                                 (repr(shape1), repr(shape0)))
        #--- End: for

        previous_location = ((-1,),) * self_ndim
        start             = [0]      * value_ndim

        save = conform_args['save']

        value.to_memory()

        for partition in self.partitions.flat():
            p_indices, shape = _partition_overlaps(partition, indices)        

            if not p_indices:
                # This partition does not overlap the indices          
                continue

            # Find which elements of value apply to this partition
            value_indices = base_value_indices[:]
       
            for i in non_broadcast_dimensions:
                j                  = i + align_offset
                location           = partition.location[j][0]
                reference_location = previous_location[j][0]

                if location > reference_location:
                    stop             = start[i] + shape[j]
                    value_indices[i] = slice(start[i], stop)
                    start[i]         = stop

                elif location == reference_location:
                    value_indices[i] = previous_slice[i]

                elif location < reference_location:
                    stop             = shape[j]
                    value_indices[i] = slice(0, stop)
                    start[i]         = stop
            #--- End: for

            previous_location = partition.location
            previous_slice    = value_indices[:]
          
            for i in reverse_dimensions:
                value_indices[i] = _mirror_index(value_indices[i], shape1[i])

            if ellipsis:
                value_indices.insert(0, ellipsis)

            v = value[tuple(value_indices)].varray
                    
            if not save:
                v = v.copy()

            # Update the partition's data
            array = partition.conform(**conform_args)
            if not conform_args['hardmask']:
                # Soft mask
                array[p_indices] = v
            else:
                # Hard mask
                array.soften_mask()

                array = array[p_indices]

                if array.mask is numpy_ma_nomask:
                    mask = True
                else:
                    mask = ~numpy_ma_getmaskarray(array)

                both_unmasked = mask & ~numpy_ma_getmaskarray(v)

                partition.data[p_indices] = numpy_ma_where(both_unmasked,
                                                           v, array)

                partition.data.harden_mask()
            #--- End: if

            partition.close()
        #--- End: For
            
    #--- End: def
    
    def _decode_json(self, string, _filename):
        '''

:Parameters:

:Returns:

    None

**Examples**
'''

        x = json_loads(string)

        # ------------------------------------------------------------
        # Initialize an empty partition array
        # ------------------------------------------------------------
        dimensions = self.dimensions

        direction = x.pop('direction', True)
        if dimensions:
            direction = dict([(dim, dir) for dim, dir in zip(dimensions, direction)])
            
        partition_array = empty(x['pshape'],
#                                dimensions=dimensions[:],
                                direction=direction,
                                dimensions=x['pdimensions'])

        # ------------------------------------------------------------
        # Fill the partition array with partitions
        # ------------------------------------------------------------
        master_units    = getattr(self.Units, 'units'   , None)
        master_calendar = getattr(self.Units, 'calendar', None)

        for attrs in x['Partitions']:
            partition = Partition(location=attrs.pop('location'))

            # Set the 'dimensions' attribute
            partition.dimensions = attrs.pop('dimensions', None)
            if partition.dimensions is None:
                # Take dimensions from the partition array 
                partition.dimensions = dimensions[:]

            # Set the 'direction' attribute
            if 'direction' not in attrs:
                # Take directions from the partition array 
                if not partition.dimensions:
                    partition.direction = True
                else:
                    partition.direction = dict([(dim, direction[dim])
                                                for dim in partition.dimensions])
            else:                
                # Use the directions specified for this partition
                d = attrs.pop('direction')
                if not partition.dimensions:
                    partition.direction = d
                else:
                    partition.direction = dict([(dim, dir)
                                               for dim, dir in zip(partition.dimensions, d)])
            #--- End: if

            # Set the 'Units' attribute from the partition array or as
            # specified for this partition
            units    = attrs.pop('units'   , master_units)
            calendar = attrs.pop('calendar', master_calendar)
            partition.Units = Units(units=units, calendar=calendar)

            # Set the 'part' attribute
            part = attrs.pop('part', [])
            if not part:
                partition.part = []
            else:
                partition.part = [(slice(*index)
                                   if isinstance(index, tuple) else
                                   index)
                                  for index in ast_literal_eval(part)]
            #--- End: if

            # Set the 'data' attribute
            format = attrs['data'].pop('format')     
            kwargs = attrs['data']
            kwargs['shape'] = tuple(kwargs['shape'])
            kwargs['ndim']  = len(kwargs['shape'])
            kwargs['size']  = long(reduce(mul, kwargs['shape'], 1))

            if 'dtype' not in kwargs or kwargs['dtype'] is None:
                # Take the data type from self
                kwargs['dtype'] = copy(self.dtype)
            else:
                # Use the data type specified for this partition
                kwargs['dtype'] = netCDF_to_numpy_data_type(kwargs['dtype'])
            
            if '_filename' not in kwargs or kwargs['_filename'] is None:
                # Take the file name the one stored in self
                kwargs['_filename'] = _filename

            if format == 'netCDF':                
                partition.data = NetCDFFileArray(**kwargs)
            elif format == 'PP':
                partition.data = PPFileArray(**kwargs)
            else:
                raise ValueError("Unknown partition file format: %s" % format)

            # Put the partition into the partition array  
            index = tuple(attrs['index'])
            partition_array[index] = partition
        #--- End: for

        # Save the partition array
        self.partitions = partition_array
    #--- End: def

    def chunk(self, chunksize=None, extra_boundaries=None, chunk_dims=None):
        '''

Partition the data array

:Parameters:

    chunksize : int, optional

    extra_boundaries : sequence of lists or tuples, optional

    chunk_dims : sequence of lists or tuples, optional

:Returns:

    extra_boundaries, chunk_dims : list, list

**Examples**

>>> d.chunk()
>>> d.chunk(100000)
>>> d.chunk(extra_boundaries=([3, 6],), chunk_dims=['dim0'])
>>> d.chunk(extra_boundaries=([3, 6], [40, 80]), chunk_dims=['dim0', 'dim1'])

'''
        if extra_boundaries is None:

            extra_boundaries = []
            chunk_dims       = []

            if not self.save_to_disk():
                return [], []
            
#DCH - test on pndim == 0
            # Assumption: there is only one partition, hmmm, could do better
            if not chunksize:
                chunksize = CHUNKSIZE()

            n_chunks = int(self.size*(self.dtype.itemsize+1)/chunksize + 0.5)
            if n_chunks <= 1:
                return [], []

            for size, dim in zip(self.shape, self.dimensions):
                if size <= n_chunks:
                    extra_boundaries.append(range(1, size))
                    chunk_dims.append(dim)
                    n_chunks -= size
                else:
                    start = int(size/float(n_chunks))
                    extra_boundaries.append(range(start, size, start))
                    chunk_dims.append(dim)
                    break
            #--- End: for        
        #--- End: if
            
        for extra_bounds, dim in zip(extra_boundaries, chunk_dims):
             if dim not in self.pdimensions:
                self.expand_partition_dims(dim)            

             self.add_partitions(extra_bounds, dim)
        #--- End: for

        return extra_boundaries, chunk_dims
    #--- End: def
    
    def _combine(self, other, method):
        '''

'''
        if not self.Units and not other.Units:
            return self.Units

        if not other.Units or not self.Units:
            raise ValueError("Can't combine %s with %s" %
                             (repr(self.Units), repr(other.Units)))

        method_type  = method[-5:-2]

        if method_type in ('mul', 'div'):
            return getattr(self.Units, method)(other.Units)
          
        if method_type in ('sub', 'add'):
          if self.Units.equivalent(other.Units):
              other.Units = self.Units
              if self.Units.isreftime and other.Units.isreftime:
                  return getattr(self.Units, method)(other.Units)
              else:
                  return self.Units.copy()
          else:
              if self.Units.isreftime and other.Units.istime:
                  other.Units  = Units(self.Units._rtime.units)
                  return self.Units.copy()
              else:
                  # Raise an exception for an illegal combination of
                  # units
                  getattr(self.Units, method)(other.Units)
        #--- End: if

        if method_type in ('_eq', '_ne', '_lt', '_le', '_gt', '_ge'):
            if self.Units.equivalent(other.Units):
                other.Units = self.Units
                return Units()
            else: 
                raise ValueError("Can't compare %s to %s" %
                                 (repr(self.Units), repr(other.Units)))
        #--- End: if

        if method_type in ('and', '_or', 'xor', 'ior'):
            if self.Units.equivalent(other.Units):
                other.Units = self.Units
                return self.Units.copy()
            else: 
                raise ValueError("Can't compare %s to %s" %
                                 (repr(self.Units), repr(other.Units)))
        #--- End: if

        if method_type == 'pow':
            if other.size > 1:
                raise ValueError(
"Can't raise %s to the power of an array with more than one element" %
self.__class__.__name__)

            if other.Units.equivalent(Units('1')):
                other.Units = Units('1')
            else:
                # Raise an exception for an illegal combination of
                # units
                getattr(self.Units, method)(other.Units)

            return self.Units ** other.varray.item()                    
        #--- End: if

        raise ValueError("doo bee doo")
    #--- End: def

    def _binary_operation(self, other, method):
        '''

Implement binary arithmetic and comparison operations with the numpy
broadcasting rules.

It is intended to be called by the binary arithmetic and comparison
methods, such as __sub__() and __lt__().

:Parameters:

    other : object
        The object on the right hand side of the operator.

    method : str
        The binary arithmetic or comparison method name (such as
        "__imul__" or "__ge__").

:Returns:

    new : Data
        A new Data object, or if the operation was in place, the same
        Data object.

**Examples**

>>> d = cf.Data([0, 1, 2, 3])
>>> e = cf.Data([1, 1, 3, 4])

>>> f = d._binary_operation(e, '__add__')
>>> print f.array
[1 2 5 7]

>>> e = d._binary_operation(e, '__lt__')
>>> print e.array
[ True False  True  True]

>>> d._binary_operation(2, '__imul__')
>>> print d.array
[0 2 4 6]

'''
        inplace      = method[2] == 'i'
        method_type  = method[-5:-2]

        if isinstance(other, self.__class__):
            # --------------------------------------------------------
            # other is Data object
            # --------------------------------------------------------
            other = other.copy()            
            result_Units = self._combine(other, method)

        elif isinstance(other, (float, int, long)):
            # --------------------------------------------------------
            # other is a number
            # --------------------------------------------------------
            if method_type == 'pow':
                result_Units = self.Units ** other
            elif method_type in ('_eq', '_ne', '_lt', '_le', '_gt', '_ge'):
                result_Units = Units()              
            else:
                result_Units = self.Units.copy()

            other = type(self)(other)
            
        elif isinstance(other, numpy.ndarray):
            # --------------------------------------------------------
            # other is a numpy array
            # --------------------------------------------------------
            if method_type == 'pow':
                if other.size > 1:
                    raise ValueError(
"Can't raise %s to the power of an array with more than one element" %
self.__class__.__name__)

                result_Units = self.Units ** other.item()
            elif method_type in ('_eq', '_ne', '_lt', '_le', '_gt', '_ge'):
                result_Units = Units()
            else:
                result_Units = self.Units.copy()

            other = type(self)(other)
   
        elif isinstance(other, (list, tuple)):
            # --------------------------------------------------------
            #
            # --------------------------------------------------------
            if method_type == 'pow':
                if len(other) > 1:
                    raise ValueError(
"Can't raise %s to the power of a sequence with more than one element" %
self.__class__.__name__)

                result_Units = self.Units ** other.varray.item()
            elif method_type in ('_eq', '_ne', '_lt', '_le', '_gt', '_ge'):
                result_Units = Units()
            else:
                result_Units = self.Units.copy()

            other = type(self)(other)
   
        else:
            # --------------------------------------------------------
            # other is something else
            # --------------------------------------------------------
            return NotImplemented
        #--- End: if

        # ------------------------------------------------------------
        # Bring other into memory, if appropriate.
        # ------------------------------------------------------------
        other.to_memory()
        
        # ------------------------------------------------------------
        # Find which dimensions need to be broadcast in one or other
        # of the arrays.
        #
        # For each common dimension, the 'broadcast_indices' list will
        # have a value of None if there is no broadcasting required
        # (i.e. the two arrays have the same size along that
        # dimension) or a value of slice(None) if broadcasting is
        # required (i.e. the two arrays have the different sizes along
        # that dimension and one of the sizes is 1).
        #
        # For example, if c.shape is [7,1,6,1,5] and d.shape is
        # [6,4,1] then broadcast_indices will be
        # [None,slice(None),slice(None)].
        #
        # The indices to d which correspond to a partition of c, are
        # the relevant subset of partition.indices updated with the
        # non None elements of the broadcast_indices list.
        #
        # In the above example, if a partition of c has
        # partition.indices of
        # (slice(0,3),slice(0,1),slice(2,4),slice(0,1),slice(0,5)),
        # the relevant subset of these is partition.indices[2:] and
        # the corresponding indices to d are
        # (slice(2,4),slice(None),slice(None))
        # ------------------------------------------------------------
        align_offset = self.ndim - other.ndim
        if align_offset >= 0:
            # self has more dimensions than other
            shape0        = self.shape[align_offset:]
            shape1        = other.shape
            ellipsis      = None
            new_shape     = self.shape[:align_offset]
            new_dimensions     = self.dimensions[:]
            new_direction = copy(self.direction)
        else:
            # other has more dimensions than self
            align_offset  = -align_offset
            shape0        = self.shape
            shape1        = other.shape[align_offset:]
            ellipsis      = (Ellipsis,)
            new_shape     = other.shape[:align_offset]
            new_dimensions     = [str(i) for i in xrange(align_offset)] + self.dimensions[:]

            if self.isscalar:
                new_direction = {}
            else:
                new_direction = self.direction.copy()

            for dim in other.dimensions[:align_offset]:
                new_direction[dim] = other.direction[dim]

            align_offset  = 0
        #--- End: if
            
        broadcast_indices = []
        for a, b in zip(shape0, shape1):
            if a == b:
                new_shape += (a,)
                broadcast_indices.append(None)
                continue
            
            if a > 1 and b == 1:
                new_shape += (a,)
            elif b > 1 and a == 1:
                new_shape += (b,)
            else:
                raise ValueError("Can't broadcast shape %s against shape %s" %
                                 (repr(other.shape), repr(self.shape)))
            
            broadcast_indices.append(slice(None))
        #--- End: for

        # ------------------------------------------------------------
        # Create a Data object which just contains the metadata for
        # the result. If we're doing a binary arithmetic operation
        # then result will get filled with data and returned. If we're
        # an augmented arithmetic assignment then we'll use result's
        # metadata to update self.
        # ------------------------------------------------------------
        new_ndim = len(new_shape)

        if new_shape != self.shape:
            set_location_map = True
            dummy_location   = [None] * new_ndim
        else:
            set_location_map = False

        if not set_location_map:
            new_size = long(reduce(mul, new_shape, 1))
        else:
            new_size = self.size

        result             = self.copy()
        result._FillValue  = self._FillValue
        result.pdimensions = self.pdimensions[:]
        result._shape      = new_shape
        result._ndim       = new_ndim
        result._size       = new_size
        result.dimensions  = new_dimensions
        result.direction   = new_direction        

        # ------------------------------------------------------------
        # Set flags to control whether or not the data of result and
        # self should be kept in memory
        # ------------------------------------------------------------
        if method_type in ('_eq', '_ne', '_lt', '_le', '_gt', '_ge'):
            result_dtype = numpy_dtype(bool)
        else:
            result_dtype = numpy_result_type(self.dtype, other.dtype)

        save_result = result.save_to_disk(result_dtype.itemsize)

        if not inplace:
            # When doing a binary arithmetic operation (like
            # result=self+2), we need to decide whether or not to keep
            # self's data in memory.
            revert_to_file = True
            save_self      = self.save_to_disk()
        else:
            # When doing an augmented arithmetic assignment (like
            # self+=2), we don't need to keep self's original data in
            # memory.
            revert_to_file = False
            save_self      = False
        #--- End: if

#        dimensions     = self.dimensions
#        direction = self.direction
#        units     = self.Units

        conform_args = self.conform_args(save=save_self,
                                         revert_to_file=revert_to_file)

# Think about dtype, here.

        for partition_r, partition_s in zip(result.partitions.flat(),
                                            self.partitions.flat()):
            
            indices = tuple([                
                    (index 
                     if not broadcast_index else
                     broadcast_index) 
                    for index, broadcast_index in zip(partition_s.indices[align_offset:],
                                                      broadcast_indices)
                    ])
            
            # If other has more dimensions than self then we must
            # prepend an Ellipsis object to the indices
            if ellipsis:
                indices = ellipsis + indices

            array0 = partition_s.conform(**conform_args)
            # POSSIBLE ISSUE HERE: array1 could be much larger
            # than the chunk size.
            array1 = other[indices].varray

            if not inplace:
                partition = partition_r
                partition.update_from(partition_s)
            else:
                partition = partition_s

            # --------------------------------------------------------
            # Do the binary operation on this partition's data
            # --------------------------------------------------------
            partition.data = getattr(array0, method)(array1)

            partition.Units     = result_Units.copy()
            partition.dimensions     = new_dimensions[:]
            partition.direction = copy(new_direction)

            if set_location_map:
                partition.location = dummy_location[:]
                partition.shape    = list(array0.shape)
            #--- End: if

            partition.close(save_result)            

            if not inplace:
                partition_s.close()
        #--- End: for

        if result_dtype == bool:
            result_Units = Units()

        if not inplace:
            result.Units = result_Units
            result.dtype = result_dtype
            
            if set_location_map:
                result._set_location_map()
                
            return result
        else:
            # Update the metadata for the new master array in place
            self.Units      = result_Units
            self.dtype      = result_dtype
            self._shape     = new_shape
            self._ndim      = new_ndim
            self.dimensions      = new_dimensions
            self._size      = new_size
            self.direction  = new_direction        
                
            if set_location_map:
                self._set_location_map()

            return self
    #--- End: def

    def _set_location_map(self):
        '''

Recalculate the `location` attribute of each Partition object in the
partition array in place.

**Examples**

>>> d._set_location_map()

'''
        self.partitions.set_location_map(self.dimensions)
    #--- End: def

    def _insert_data(self, other, indices,
                     PDim, PDim_direction, dim_name_map):
        '''

'''
        data0 = self
        data1 = other
            
        # ------------------------------------------------------------
        # 1. Change the dimension names in data1 to match those in the
        # the space of data0.
        # ------------------------------------------------------------
        data1.change_dimension_names(dim_name_map)


        # ------------------------------------------------------------
        # 2. Insert (size 1) dimensions into data0 which are present
        # in data1 but not in data0
        #
        # Note that the dimensions to be inserted into data0 will have
        # size 1 in data1 (because the two spaces are aggregatable).
        #
        # Note that after this process data1.dimensions will be a
        # subset of data0.dimensions (because the two spaces are
        # aggregatable and so have the same rank).
        # ------------------------------------------------------------
        dimensions0     = data0.dimensions
        direction1 = data1.direction
        for dim1 in data1.dimensions[::-1]:
            if dim1 not in dimensions0:
                data0.expand_dims(0, dim1, direction1[dim1])
        #--- End: for
    
        # ------------------------------------------------------------
        # 3. If the Aggregating Dimension is missing from data0 then
        # insert it (with size 1 in the slowest varying position).
        # 
        # Note that after this process data1.dimensions will be a subset of
        # data0.dimensions
        # ------------------------------------------------------------
        if PDim not in data0.dimensions:
            data0.expand_dims(0, PDim, PDim_direction)

        # ------------------------------------------------------------
        # 4. Permute the dimensions of data1 to match data0.
        #
        # For example: if data0.dimensions=['dim0','dim1','dim2'] and
        #              data1.dimensions=['dim2','dim0'] to start with then
        #              data1.dimensions will become ['dim0','dim2']
        #
        # Note that by now, data1.dimensions is a subset of
        # data0.dimensions (this was was ensured by steps 2 and 3).
        # ------------------------------------------------------------
        dimensions1 = data1.dimensions
        axes   = [dimensions1.index(dim)
                  for dim in data0.dimensions if dim in dimensions1] 
        data1.transpose(axes)

        # ------------------------------------------------------------
        # 5. Insert (size 1) dimensions into data1 which are present in
        # data0 but not in data1.
        #
        # For example: if data0.dimensions=['dim0','dim1','dim2'] and
        #              data1.dimensions=['dim0','dim2'] to start with then
        #              data1.dimensions will become ['dim0','dim1','dim2']
        #
        # Note that after this process data0 and data1 will span the
        # same dimensions.
        #
        # Note that the orders of dimensions in data0 and data1 have
        # to be the same (so that their partitions' locations
        # conform).
        #
        # Note that after this process the Aggregating Dimension will
        # definitely be in both data0 and data1.
        # ------------------------------------------------------------
        direction0 = data0.direction
        dimensions1     = data1.dimensions
        for i, dim in enumerate(data0.dimensions):
            if dim not in dimensions1:
                data1.expand_dims(i, dim, direction0)
        #--- End: for

        # ------------------------------------------------------------
        # 6. Make sure that the partition dimensions of data0 are the
        # same as data1 (although, for now, they may have different
        # orders).
        #
        # Note that this may involve adding new partition dimensions
        # to either or both of data0 and data1.
        #            
        # Note that if the Aggregating Dimension needs to be added it
        # is inserted as the outer (slowest varying) dimension to
        # reduce the likelihood of having to transpose (expensively)
        # the partition array.
        # ------------------------------------------------------------
        for f, g in zip((data0, data1), 
                        (data1, data0)):

            f_pdimensions = f.pdimensions
            g_pdimensions = g.pdimensions[:]
            try:
                g_pdimensions.remove(PDim)
            except ValueError:
                pass
    
            for pdim in g_pdimensions[::-1]:
                if pdim not in f_pdimensions:
                    f.expand_partitions_dims(pdim)

            if PDim not in f.pdimensions:
                f.expand_partition_dims(PDim)
        #--- End: for

        # ------------------------------------------------------------
        # 7. Permute the partition dimensions of the partition array
        # of data0 so that the Aggregating Dimension is the outermost
        # (slowest varying) dimension.
        # ------------------------------------------------------------
        axis = data0.pdimensions.index(PDim)
        data0.partitions.rollaxis(axis, 0)
            
        # ------------------------------------------------------------
        # 8. Permute the partition dimensions of the partition array
        # of data1 to match data0.
        # ------------------------------------------------------------
        pdimensions1 = data1.pdimensions
        axes   = [pdimensions1.index(dim) for dim in data0.pdimensions]
        data1.partitions.transpose(axes)
            
        # ------------------------------------------------------------
        # 9. Create new partition boundaries in the partition arrays
        # of data0 and data1 so that their partition arrays may be
        # considered as different slices of a common, larger
        # hyperrectangular partition array.
        #
        # Note that there is no need to add any boundaries across the
        # Aggregating Dimension.
        # ------------------------------------------------------------
        boundaries0 = data0.partition_boundaries()        
        boundaries1 = data1.partition_boundaries()

        for dim in data0.pdimensions[1:]:
            
            # Still here? Then see if there are any partition
            # boundaries to be created for this partition dimension
            bounds0 = boundaries0[dim]
            bounds1 = boundaries1[dim]

            symmetric_diff = set(bounds0).symmetric_difference(bounds1)            
            if not symmetric_diff:
                # The partition boundaries for this partition
                # dimension are already the same in data0 and data1
                continue
            
            # Still here? Then there are some partition boundaries to
            # be created for this partition dimension in data0 and/or
            # data1.
            for f, g, bf, bg in zip((data0, data1, bounds0, bounds1), 
                                    (data1, data0, bounds1, bounds0)):                
                extra_bounds = [i
                                for i in bg
                                if i in symmetric_diff]
                
                f.add_partitions(extra_bounds, dim, boundaries=bf)
            #--- End: for
        #--- End: for
                
        # ------------------------------------------------------------
        # 10. Merge data1 into data0 along the Aggregating Dimension.
        #
        # If no indices have been provided then, along the Aggregating
        # Dimension, concatenate the partitions of data1 onto the end
        # of the partitions of data0.
        # ------------------------------------------------------------
        if indices is None:
            npartitions0 = data0.npartitions
            indices = xrange(npartitions0, npartitions0+data1.npartitions)
        #--- End: if

        partitions0 = data0.partitions
        partitions1 = data1.partitions
        for i, index in enumerate(indices):
            partitions0.insert(index, partitions1[i])
                                           
        # Update the size of data0
        data0._size += long(data1._size)

        # Update the shape of data0
        index  = data1.dimensions.index(PDim)
        shape0 = list(data0._shape)
        shape0[index] += data1._shape[index]
        data0._shape = tuple(shape0)

        # Update the location map of the partition array of data0
        data0._set_location_map()

        # Set the new dtype
        dtype0 = data0.dtype
        dtype1 = data1.dtype
        if dtype0 != dtype1:
            data0.dtype = numpy_result_type(dtype0, dtype1)        

        # ------------------------------------------------------------
        # If the new array is larger than the chunk size then save any
        # numpy arrays to disk
        # ------------------------------------------------------------
        if data0.save_to_disk():
            data0.to_disk()
        
        # ------------------------------------------------------------
        # Done
        # ------------------------------------------------------------
    #--- End: def

    def _parse_axes(self, axes, method, default=[], dims=None):
        '''
        
:Parameters:

    axes : (sequence of) int or str
        The axes of the data array. May be one of, or a sequence of
        any combination of:

            * The integer position of a dimension in the data array
              (negative indices allowed).
            * The internal name a dimension.

    method : str

:Returns:

    out : list

**Examples**

'''
        if not axes and axes is not 0:
            axes = default

        if isinstance(axes, (str, int, long)):
            # Convert an integer to a single element sequence
            axes = [axes]
        else:
            axes = list(axes)
            
        dimensions = self.dimensions
        for i, axis in enumerate(axes):            
            if axis in dimensions:
                axes[i] = dimensions.index(axis)
            elif isinstance(axis, (int, long)):
                if axis < 0:
                    axes[i] = axis + self.ndim
            else:
                raise ValueError(
                    "Can't %s: Axis must be integer or dimension name: %s" %
                    (method, axis))
        #--- End: for
            
        if axes:
            # Check for duplicate axes
            if len(axes) != len(set(axes)):
                raise ValueError("Can't %s: Repeated axis: %s" %
                                 (method, repr(axes)))
            
            # Check for out of range axes
            if max(axes) >= self.ndim:
                raise ValueError("Can't %s: Invalid axis for this array: %d" %
                                 (method, max(axes)))
        #--- End: if

        if dims:
            # Convert axes from integer positions to dimension identifiers
            dimensions = self.dimensions
            return axes, [dimensions[i] for i in axes]
        else:
            return axes
    #--- End: def

    def _unary_operation(self, operation):
        '''

Implement unary arithmetic operations.

It is intended to be called by the unary arithmetic methods, such as
__abs__().

:Parameters:

    operation : str
        The unary arithmetic method name (such as "__abs__").

:Returns:

    new : Data
        A new Data array.

**Examples**

>>> d = cf.Data([1, 2, -3, -4, -5])

>>> e = d._unary_operation('__abs__')
>>> print e.array
[1 2 3 4 5]]

>>> e = d.__abs__()
>>> print e.array
[[1 2 3 4 5]]

>>> e = abs(d)
>>> print e.array
[[1 2 3 4 5]]

'''
        self.to_memory()

        new = self.copy()

        conform_args = new.conform_args()

        for partition in new.partitions.flat():
            array = partition.conform(**conform_args)
            partition.data = getattr(array, operation)()
            partition.close()
        #--- End: for

        return new
    #--- End: def

    def __add__(self, other):
        '''
x.__add__(y) <==> x+y

'''
        return self._binary_operation(other, '__add__')
    #--- End: def

    def __iadd__(self, other):
        '''
x.__iadd__(y) <==> x+=y

'''
        return self._binary_operation(other, '__iadd__')
    #--- End: def

    def __radd__(self, other):
        '''
x.__radd__(y) <==> y+x

'''
        return self._binary_operation(other, '__radd__')
    #--- End: def

    def __sub__(self, other):
        '''
x.__sub__(y) <==> x-y

'''
        return self._binary_operation(other, '__sub__')
    #--- End: def

    def __isub__(self, other):
        '''
x.__isub__(y) <==> x-=y

'''
        return self._binary_operation(other, '__isub__')
    #--- End: def

    def __rsub__(self, other):
        '''
x.__rsub__(y) <==> y-x

'''
        return self._binary_operation(other, '__rsub__')
    #--- End: def

    def __mul__(self, other):
        '''
x.__mul__(y) <==> x*y

'''
        return self._binary_operation(other, '__mul__')
    #--- End: def

    def __imul__(self, other):
        '''
x.__imul__(y) <==> x*=y

'''
        return self._binary_operation(other, '__imul__')
    #--- End: def

    def __rmul__(self, other):
        '''
x.__rmul__(y) <==> y*x

'''
        return self._binary_operation(other, '__rmul__')
    #--- End: def

    def __div__(self, other):
        '''
x.__div__(y) <==> x/y

'''
        return self._binary_operation(other, '__div__')
    #--- End: def

    def __idiv__(self, other):
        '''
x.__idiv__(y) <==> x/=y

'''
        return self._binary_operation(other, '__idiv__')
    #--- End: def

    def __rdiv__(self, other):
        '''
x.__rdiv__(y) <==> y/x

'''
        return self._binary_operation(other, '__rdiv__')
    #--- End: def

    def __floordiv__(self, other):
        '''
x.__floordiv__(y) <==> x//y

'''
        return self._binary_operation(other, '__floordiv__')
    #--- End: def

    def __ifloordiv__(self, other):
        '''
x.__ifloordiv__(y) <==> x//=y

'''
        return self._binary_operation(other, '__ifloordiv__')
    #--- End: def

    def __rfloordiv__(self, other):
        '''
x.__rfloordiv__(y) <==> y//x

'''
        return self._binary_operation(other, '__rfloordiv__')
    #--- End: def

    def __truediv__(self, other):
        '''
x.__truediv__(y) <==> x/y

'''
        return self._binary_operation(other, '__truediv__')
    #--- End: def

    def __itruediv__(self, other):
        '''
x.__itruediv__(y) <==> x/=y

'''
        return self._binary_operation(other, '__itruediv__')
   #--- End: def

    def __rtruediv__(self, other):
        '''
x.__rtruediv__(y) <==> y/x

'''
        return self._binary_operation(other, '__rtruediv__')
    #--- End: def

    def __pow__(self, other, modulo=None):
        '''
x.__pow__(y) <==> x**y

'''  
        if modulo is not None:
            raise NotImplementedError("3-argument power not supported for '%s'" %
                                      self.__class__.__name__)

        return self._binary_operation(other, '__pow__')
    #--- End: def

    def __ipow__(self, other, modulo=None):
        '''
x.__ipow__(y) <==> x**=y

'''  
        if modulo is not None:
            raise NotImplementedError("3-argument power not supported for '%s'" %
                                      self.__class__.__name__)

        return self._binary_operation(other, '__ipow__')
    #--- End: def

    def __rpow__(self, other, modulo=None):
        '''
x.__ipow__(y) <==> x**=y

'''  
        if modulo is not None:
            raise NotImplementedError("3-argument power not supported for '%s'" %
                                      self.__class__.__name__)

        return self._binary_operation(other, '__rpow__')
    #--- End: def

    def __eq__(self, other):
        '''
x.__eq__(y) <==> x==y

'''
        return self._binary_operation(other, '__eq__')
    #--- End: def

    def __ne__(self, other):
        '''
x.__ne__(y) <==> x!=y

'''
        return self._binary_operation(other, '__ne__')
    #--- End: def

    def __ge__(self, other):
        '''
x.__ge__(y) <==> x>=y

'''
        return self._binary_operation(other, '__ge__')
    #--- End: def

    def __gt__(self, other):
        '''
x.__gt__(y) <==> x>y

'''
        return self._binary_operation(other, '__gt__')
    #--- End: def

    def __le__(self, other):
        '''
x.__le__(y) <==> x<=y

'''
        return self._binary_operation(other, '__le__')
    #--- End: def

    def __lt__(self, other):
        '''
x.__lt__(y) <==> x<y

'''
        return self._binary_operation(other, '__lt__')
    #--- End: def

    def __and__(self, other):
        '''
x.__and__(y) <==> x&y

'''
        return self._binary_operation(other, '__and__')
    #--- End: def

    def __iand__(self, other):
        '''
x.__iand__(y) <==> x&=y

'''
        return self._binary_operation(other, '__iand__')
    #--- End: def

    def __or__(self, other):
        '''
x.__or__(y) <==> x|y

'''
        return self._binary_operation(other, '__or__')
    #--- End: def

    def __ior__(self, other):
        '''
x.__ior__(y) <==> x|=y

'''
        return self._binary_operation(other, '__ior__')
    #--- End: def


    def __xor__(self, other):
        '''
x.__xor__(y) <==> x^y

'''
        return self._binary_operation(other, '__xor__')
    #--- End: def

    def __ixor__(self, other):
        '''
x.__ixor__(y) <==> x^=y

'''
        return self._binary_operation(other, '__ixor__')
    #--- End: def

    def __abs__(self):
        '''
x.__abs__() <==> abs(x)

'''
        return self._unary_operation('__abs__')
    #--- End: def

    def __neg__(self):
        '''
x.__neg__() <==> -x

'''
        return self._unary_operation('__neg__')
    #--- End: def

    def __invert__(self):
        '''
x.__invert__() <==> ~x

'''
        return self._unary_operation('__invert__')
    #--- End: def

    def __pos__(self):
        '''
x.__pos__() <==> +x

'''
        return self._unary_operation('__pos__')
    #--- End: def

    def _set(self, attr, value):
        '''

Set an attribute which actually belongs to the partition array. This
is used by the decorated properties,

'''
        setattr(self.partitions, attr, value)  
    #--- End: def

    def _get(self, attr):
        '''

Get an attribute which actually belongs to the partition array. This
is used by the decorated properties,

'''
        return getattr(self.partitions, attr)
    #--- End: def

    def _del(self, attr):
        '''

Delete an attribute which actually belongs to the partition
array. This is used by the decorated properties,

'''
        try:
            delattr(self.partitions, attr)
        except AttributeError:
            raise AttributeError("Can't delete '%s' attribute '%s'" %
                                 (self.__class__.__name__, attr))
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: Units
    # ----------------------------------------------------------------
    @property
    def Units(self):
        '''

The `Units` object containing the units of the data array.

Deleting the `Units` attribute is equivalent to setting the units the
undefined units object, so the Data object is guaranteed to always
have the `Units` attribute.

**Examples**

>>> d.Units = cf.Units('m')
>>> d.Units
<CF Units: m>
>>> del d.Units
>>> d.Units
<CF Units: >

'''
        return self._Units #self._get('Units')
    #--- End: def

    @Units.setter
    def Units(self, value): self._Units = value #self._set('Units', value)
    @Units.deleter
    def Units(self)       : self._Units = Units(None)

    # ----------------------------------------------------------------
    # Attribute: direction
    # ----------------------------------------------------------------
    @property
    def direction(self):
        '''



**Examples**

>>> d.dimensions
['dim2', 'dim0', 'dim3']
>>> d.direction
{'dim0': True, 'dim1': False, 'dim2': True}

>>> d.dimensions
[]
>>> d.direction
True

>>> d.dimensions
[]
>>> d.direction
False

'''
        return self._get('direction')
    @direction.setter
    def direction(self, value): self._set('direction', value)
    @direction.deleter
    def direction(self)       : self._del('direction')

    # ----------------------------------------------------------------
    # Attribute: dtype
    # ----------------------------------------------------------------
    @property
    def dtype(self):
        '''

The numpy data type of the data array.

By default this is the data type with the smallest size and smallest
scalar kind to which all data array partitions may be safely cast
without loss of information. For example, if the partitions have data
types 'int64' and 'float32' then the data array's data type will be
'float64'; or if the partitions have data types 'int64' and 'int32'
then the data array's data type will be 'int64'.

Setting the data type to a numpy.dtype object, or any object
convertible to a numpy.dtype object, will cause the data array
elements to be recast to the specified type at the time that they are
next accessed. This does not immediately change the data array
elements, so, for example, reinstating the original data type prior to
data access results in no loss of information.

Deleting the data type forces the default behaviour. Note that if the
data type of any partitions has changed after `dtype` has been set
(which could occur if the data array is accessed) then the reinstated
default data type may be different to the data type prior to `dtype`
being set.

**Examples**

>>> d.dtype
dtype(float64')
>>> type(d.dtype)
<type 'numpy.dtype'>
>>> print d.array
[0.5 1.5 2.5]

>>> print d.array
[0.5 1.5 2.5]
>>> import numpy
>>> d.dtype = numpy.dtype(int)
>>> print d.array
[0 1 2]
>>> d.dtype = bool
>>> print d.array
[False True True]
>>> d.dtype = 'float64'
>>> print d.array
[0.5 1.5 2.5]

'''
        if hasattr(self, '_dtype'):
            return self._dtype
    #--- End: def
    @dtype.setter
    def dtype(self, value):
        self._dtype = numpy_dtype(value)
    #--- End: def
    @dtype.deleter
    def dtype(self):
        self._dtype = numpy_result_type(*(partition.data
                                          for partition in self.partitions.flat()))
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: _FillValue
    # ----------------------------------------------------------------
    @property
    def _FillValue(self):
        '''

The _FillValue CF attribute.

If set to None then the default numpy fill value appropriate to the
data array's data type will be used.

Deleting the `_FillValue` attribute actually sets it to None, so the
Data object is guaranteed to always have the `_FillValue` attribute.

**Examples**

>>> f._FillValue = 9999.0
>>> f._FillValue
9999.0
>>> del d._FillValue
>>> d._FillValue
None

'''
        return self._FillValueP #_get('_FillValue')
    #--- End: def
    @_FillValue.setter
    def _FillValue(self, value): self._FillValueP = value #self._set('_FillValue', value)
    @_FillValue.deleter
    def _FillValue(self)       : self._FillValueP = None #self._FillValue = None

    # ----------------------------------------------------------------
    # Attribute: hardmask
    # ----------------------------------------------------------------
    @property
    def hardmask(self):
        '''

Whether the mask is hard (True) or soft (False).

When the mask is hard, masked entries of the data array can not be
unmasked by assignment.

By default, the mask is hard.

**Examples**

>>> d.hardmask = False
>>> d.hardmask
False

'''
        return self._hardmask #self._get('hardmask')
    @hardmask.setter
    def hardmask(self, value):
        self._hardmask = value #self._set('hardmask', value)
    @hardmask.deleter
    def hardmask(self):
        raise AttributeError("Won't delete %s attribute 'hardmask'" %
                             self.__class__.__name__)
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: ismasked (read only)
    # ----------------------------------------------------------------
    @property
    def ismasked(self):
        '''

True if the data array has any masked values.

**Examples**

>>> d.ismasked
True

'''
        nomask = numpy_ma_nomask

        conform_args = self.conform_args(revert_to_file=True) #, dtype=None)

        for partition in self.partitions.flat():
            array = partition.conform(**conform_args)
            partition.close()
            if (numpy_ma_is_masked(array) and 
                (array.mask is not nomask or array.mask.any())):
                return True
        #--- End: for

        return False
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: isscalar (read only)
    # ----------------------------------------------------------------
    @property
    def isscalar(self):
        '''

True if the data array is a 0-d scalar array.

**Examples**

>>> d.ndim
0
>>> d.isscalar
True

>>> d.ndim >= 1
True
>>> d.isscalar
False

'''
        return not self.ndim
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: ndim (read only)
    # ----------------------------------------------------------------
    @property
    def ndim(self):
        '''

Number of dimensions in the data array.

**Examples**

>>> d.shape
[73, 96]
>>> d.ndim
2

'''
        return self._ndim #get('ndim')
    #--- End: def
#    @ndim.setter
#    def ndim(self, value): self._set('ndim', value)
#    @ndim.deleter
#    def ndim(self)       : self._del('ndim')

#    # ----------------------------------------------------------------
#    # Attribute: dimensions
#    # ----------------------------------------------------------------
#    @property
#    def dimensions(self):
#        '''
#
#'''
#        return self._get('dimensions')
#    @dimensions.setter
#    def dimensions(self, value): self._set('dimensions', value)
#    @dimensions.deleter
#    def dimensions(self)       : self._del('dimensions')

    # ----------------------------------------------------------------
    # Attribute: pdimensions
    # ----------------------------------------------------------------
    @property
    def pdimensions(self):
        '''
'''
        return self._get('dimensions')
    #--- End: def
    @pdimensions.setter
    def pdimensions(self, value): self._set('dimensions', value)
    @pdimensions.deleter
    def pdimensions(self)       : self._del('dimensions')

    # ----------------------------------------------------------------
    # Attribute: pndim
    # ----------------------------------------------------------------
    @property
    def pndim(self):
        '''
'''
        return self._get('ndim')
    #--- End: def
    @pndim.setter
    def pndim(self, value): self._set('ndim', value)
    @pndim.deleter
    def pndim(self)       : self._del('ndim')

    # ----------------------------------------------------------------
    # Attribute: psize
    # ----------------------------------------------------------------
    @property
    def psize(self):
        '''

Number of data array partitions.

**Examples**

>>> d.pshape
(73, 2)
>>> d.psize
146

'''
        return self._get('size')
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: pshape
    # ----------------------------------------------------------------
    @property
    def pshape(self):
        '''

List of the data array's *partition* dimension sizes.

Note that this attribute is a list, not a tuple.

**Examples**

>>> d.shape
(73, 96)
>>> d.pshape
[73, 2]

'''
        return self._get('shape')
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: shape (read only)
    # ----------------------------------------------------------------
    @property
    def shape(self):
        '''

Tuple of the data array's dimension sizes.

**Examples**

>>> d.shape
(73, 96)

>>> d.shape
()

'''
        return self._shape #get('shape')
    #--- End: def
#    @shape.setter
#    def shape(self, value): self._shape = value #_set('shape', value)
#    @shape.deleter
#    def shape(self)       : self._del('shape')

    # ----------------------------------------------------------------
    # Attribute: size (read only)
    # ----------------------------------------------------------------
    @property
    def size(self):
        '''

Number of elements in the data array.

**Examples**

>>> d.shape
(73, 96)
>>> d.size
7008

'''
        return self._size #get('size')
    #--- End: def
#    @size.setter
#    def size(self, value): self._set('size', value)
#    @size.deleter
#    def size(self)       : self._del('size')

#    def save_to_disk(self, itemsize=None):
#        '''
#
#:Parameters:
#
#    itemsize : int, optional
#
#:Returns:
#
#    out : bool
#
#**Examples**
#
#>>> 
#>>>
#
#'''
#        return self.partitions.save_to_disk(itemsize)
#    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: array (read only)
    # ----------------------------------------------------------------
    @property
    def array(self):
        '''

A numpy array copy the data array.

The data type of the array is as returned by the `dtype` arttribute.

**Examples**

>>> a = d.array
>>> isinstance(a, numpy.ndarray)
True

'''
        if not self.pdimensions:
            # If there is only one partition we can speed things up
            conform_args = self.conform_args(revert_to_file=True)
            partition = self.partitions[0]
            array_out = partition.conform(**conform_args)
            array_out = array_out.copy()
            partition.close()
            return array_out
        #--- End: if

        array_out = numpy.ma.empty(self.shape, dtype=self.dtype)

        conform_args = self.conform_args(revert_to_file=True) #, dtype=None)

        for partition in self.partitions.flat():
            data = partition.conform(**conform_args)
            array_out[partition.indices] = data
            partition.close()
        #--- End: for

        if self.hardmask:
            # Harden the mask of the output array
            array_out.harden_mask()

        # Shrink the mask
        array_out.shrink_mask()

        return array_out
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: varray (read only)
    # ----------------------------------------------------------------
    @property
    def varray(self):
        '''

A numpy array view the data array.

Note that making changes to elements of the returned view changes the
underlying data.

**Examples**

>>> a = d.varray
>>> type(a)
<type 'numpy.ndarray'>
>>> a
array([0, 1, 2, 3, 4])
>>> a[0] = 999
>>> d.varray
array([999, 1, 2, 3, 4])

'''
        if not self.pdimensions:
            # If there is only one partition, then we can conform it
            # and return a view to the partition's data without having
            # to create an empty array and then filling it up
            # partition by partition.
            conform_args = self.conform_args(save=False)
            self.partitions[0].conform(**conform_args)
            # Note that there is no need to close the partition here.
            return self.partitions[0].data.view()
        #--- End: if

        # Still here?  
        conform_args = self.conform_args(save=False, dtype=None)
        shape = self.shape
        array = numpy.ma.empty(shape, dtype=self.dtype)

        for partition in self.partitions.flat():
            data = partition.conform(**conform_args)
            array[partition.indices] = data
            # Note that there is no need to close the partition here.
        #--- End: for

        array.shrink_mask()
        if self.hardmask:
            # Harden the mask of the output array
            array.harden_mask()

        self.partitions = PartitionArray(
            [Partition(data      = array,
                       direction = copy(self.direction),
                       location  = [(0,n) for n in shape],
                       dimensions     = self.dimensions[:],
                       part      = [],
                       shape     = list(shape),
                       Units     = self.Units.copy(),      
                       )
             ],
            dimensions  = self.dimensions,
            pdimensions      = [],
            direction   = self.direction,
            )

        return array.view()
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: mask (read only)
    # ----------------------------------------------------------------
    @property
    def mask(self):
        '''

The boolean missing data mask of the data array.

The boolean mask has True where the data array has missing data and
False otherwise.

**Examples**

>>> d.shape
(12, 73, 96)
>>> m = d.mask
>>> m
<CF Data: >
>>> m.dtype
dtype('bool')
>>> m.shape
(12, 73, 96])

'''
        self.to_memory()

        mask = self.copy()

        conform_args = mask.conform_args(
            save=self.save_to_disk(numpy_dtype(bool).itemsize)) #, dtype=None)

        for partition in mask.partitions.flat():
            array = partition.conform(**conform_args)
            
            if numpy_ma_is_masked(array):
                # data is a numpy.ma.array instance and has a mask
                # with at least one True element.
                partition.data = array.mask.copy()
            else:
                # data is not a numpy.ma.array instance or data is a
                # numpy.ma.array instance and has a mask of
                # numpy.ma.nomask.
                partition.data = numpy.zeros(array.shape, dtype=bool)

            partition.Units = Units()
            partition.close()
        #--- End: for

        mask.Units = Units()

        mask.hardmask = False

        return mask
    #--- End: def

    @mask.setter
    def mask(self, value):
        raise AttributeError(
            "Can't set the 'mask' attribute directly, use 'setmask'")
    #--- End: def
    @mask.deleter
    def mask(self):
        raise AttributeError(
            "Can't delete the 'mask' attribute directly, use 'setmask'")
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: first_datum (read only)
    # ----------------------------------------------------------------
    @property
    def first_datum(self):
        ''' 

The first element of the data array.

**Examples**

>>> print d.array
[[1 2 3 4]]
>>> d.first_datum
1

>>> print d.array
[[-- 2 3 4]]
>>> d.first_datum
--

'''
        array = self[(slice(0,1),) * self.ndim].array
        if numpy_ma_is_masked(array):
            return numpy.ma.masked
        else:
            return array.item()
    #--- End: def

    # ----------------------------------------------------------------
    # Attribute: last_datum (read only)
    # ----------------------------------------------------------------
    @property
    def last_datum(self):
        ''' 

The last element of the data array.

**Examples**

>>> print d.array
[[1 2 3 4]]
>>> d.last_datum
4

>>> print d.array
[[1 2 3 --]]
>>> d.last_datum
--

'''
        array = self[(slice(-1,None),) * self.ndim].array
        if numpy_ma_is_masked(array):
            return numpy.ma.masked
        else:
            return array.item()
    #--- End: def

    def add_partitions(self,
                       extra_boundaries, 
                       pdim,
                       existing_boundaries=None):
        '''

Add partition boundaries.

**Examples**

>>> d.add_partitions(    )

'''            
        self.partitions.add_partitions(self.dimensions,
                                       extra_boundaries, 
                                       pdim,
                                       existing_boundaries=existing_boundaries)
    #--- End: def

    def all(self):
        '''

Test whether all data array elements evaluate to True.

Performs a logical and over the data array and returns the
result. Masked values are considered as True during computation.

**Examples**

>>> print d.array
[[0 3 0]]
>>> d.all()
False

>>> print d.array
[[1 3 --]]
>>> d.all()
True

'''
        conform_args = self.conform_args(revert_to_file=True)
              
        for partition in self.partitions.flat():
            array = partition.conform(**conform_args)
            partition.close()
            if not array.all():
                return False
        #--- End: for

        return True
    #--- End: def

    def any(self):
        '''

Test whether any data array elements evaluate to True.

Performs a logical or over the data array and returns the
result. Masked values are considered as False during computation.

**Examples**

>>> print d.array
[[0 0 0]]
>>> d.any()
False

>>> print d.array
[[-- 0 0]]
>>> d.any()
False

>>> print d.array
[[0 3 0]]
>>> d.any()
True

'''         
        conform_args = self.conform_args(revert_to_file=True)

        for partition in self.partitions.flat():
            array = partition.conform(**conform_args)
            partition.close()
            if array.any():
                return True
        #--- End: for

        return False
    #--- End: def

    def binary_mask(self):
        '''

Return a binary missing data mask of the data array.

The binary mask's data array comprises dimensionless 8-bit integers
and has 0 where the data array has missing data and 1 otherwise.

:Returns:

    out : Data
        The binary mask.

**Examples**

>>> print d.mask.array
[[ True False  True False]]
>>> b = d.binary_mask().array
>>> print b
[[0 1 0 1]]

'''
        self.to_memory()

        binary_mask = self.copy()

        conform_args = binary_mask.conform_args(
            save=self.save_to_disk(numpy_dtype('int8').itemsize), dtype=None)

        for partition in binary_mask.partitions.flat():            
            array = partition.conform(**conform_args)        
            if numpy_ma_is_masked(array):
                # data is a numpy.ma.array instance and has a mask
                # with at least one True element.
                partition.data = numpy.ma.array(~array.mask, dtype='int8')
            else:
                # data is not a numpy.ma.array instance or data is a
                # numpy.ma.array instance and has a mask of
                # numpy.ma.nomask.
                partition.data = numpy.ma.ones(array.shape, dtype='int8')

            partition.Units = Units('1')
            partition.close()
        #--- End: for

        binary_mask.Units = Units('1')

        return binary_mask
    #--- End: def

    def clip(self, a_min, a_max, units=None):
        '''

Clip (limit) the values in the data array in place.

Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of [0, 1] is specified
then values smaller than 0 become 0 and values larger than 1 become 1.

Parameters :
 
    a_min : scalar

    a_max : scalar

    units : str or Units

:Returns: 

    None

**Examples**

'''
        if units is not None:
            # Convert the limits to the same units as the data array
            if isinstance(units, basestring):
                units = Units(units)

            a_min = Units.conform(a_min, units, self.Units)
            a_max = Units.conform(a_max, units, self.Units)
        #--- End: if
            
        conform_args = self.conform_args()
            
        for partition in self.partitions.flat():            
            array = partition.conform(**conform_args)            
            array.clip(a_min, a_max, out=array)
            partition.close()
        #--- End: if
    #--- End: def

    def close(self):
        '''

Close all referenced open files.

:Returns:

    None

**Examples**

>>> d.close()

'''    
        for partition in self.partitions.flat():
            partition.file_close()
    #--- End: def

            
    def copy(self):
        '''

Return a deep copy.

Equivalent to ``copy.deepcopy(d)``.

:Returns:

    out : 
        The deep copy.

**Examples**

>>> e = d.copy()

'''
        new            = type(self)()

        new.partitions = self.partitions.copy()

        new.dtype      = self.dtype
        new._FillValue = self._FillValue
        new.hardmask   = self.hardmask
        new._ndim      = self._ndim
        new._shape     = self._shape
        new._size      = self._size
        new.dimensions = self.dimensions[:]
        new.Units      = self.Units.copy()

        return new
    #--- End: def

    def cos(self):
        '''

Take the trigonometric cosine of the data array in place.

Units are accounted for in the calcualtion, so that the the cosine of
90 degrees_east is 0.0, as is the sine of 1.57079632 radians. If the
units are not equivalent to radians (such as Kelvin) then they are
treated as if they were radians.

The Units are changed to '1' (nondimensionsal).

:Returns:

    None

**Examples**

>>> d.Units
<CF Units: degrees_east>
>>> print d.array
[[-90 0 90 --]]
>>> d.cos()
>>> d.Units
<CF Units: 1>
>>> print d.array
[[0.0 1.0 0.0 --]]

>>> d.Units
<CF Units: m s-1>
>>> print d.array
[[1 2 3 --]]
>>> d.cos()
>>> d.Units
<CF Units: 1>
>>> print d.array
[[0.540302305868 -0.416146836547 -0.9899924966 --]]

'''
        radians = Units('radians')
        if self.Units.equivalent(radians):
            self.Units = radians

        self.func(numpy.ma.cos)

        self.override_units('1')
    #--- End: def
    
    def dump(self, id=None):
        '''

Return a string containing a full description of the instance.

:Parameters:

    id : str, optional
       Set the common prefix of component names. By default the
       instance's class name is used.

:Returns:

    out : str
        A string containing the description.

**Examples**

>>> x = d.dump()
>>> print d.dump()
>>> print d.dump(id='data1')

'''
        if id is None:
            id = self.__class__.__name__
            
        string = ['%s.shape = %s' % (id, self.shape)]

        if self.size == 1:
            string.append('%s.first_datum = %s' % (id, self.first_datum))
        else:
            string.append('%s.first_datum = %s' % (id, self.first_datum))
            string.append('%s.last_datum  = %s' % (id, self.last_datum))
        #-- End: if
            
        for attr in ('_FillValue', 'Units'):
            string.append('%s.%s = %s' % (id, attr,
                                          repr(getattr(self, attr))))
        #--- End: for

        return '\n'.join(string)
    #--- End: def

    def iterindices(self):
        '''

Return an iterator over indices of the data array.

:Returns:

    out : generator
        An iterator over indices of the data array.

**Examples**

>>> d.shape
(2, 1, 3)
>>> for index in d.iterindices():
...     print index
...
(0, 0, 0)
(0, 0, 1)
(0, 0, 2)
(1, 0, 0)
(1, 0, 1)
(1, 0, 2)

'''
        return iterindices([(0, n) for n in self.shape])
    #--- End: def

    def equals(self, other, rtol=None, atol=None, traceback=False):
        '''

True if two data arrays are logically equal, False otherwise.

:Parameters:

    other : 
        The object to compare for equality.

    atol : float, optional
        The absolute tolerance for all numerical comparisons, By
        default the value returned by the `ATOL` function is used.

    rtol : float, optional
        The relative tolerance for all numerical comparisons, By
        default the value returned by the `RTOL` function is used.

    traceback : bool, optional
        If True then print a traceback highlighting where the two
        instances differ.

:Returns: 

    out : bool
        Whether or not the two instances are equal.

**Examples**

>>> d.equals(d)
True
>>> d.equals(d + 1)
False

'''
        # Check each instance's id
        if self is other:
            return True
 
        # Check that each instance is the same type
        if self.__class__ != other.__class__:
            return False

        # Check that each instance has the same shape
        if self.shape != other.shape:
            if traceback:
                print("%s: Different shape: %s, %s" %
                      (self.__class__.__name__, self.shape, other.shape))
            return False
        #--- End: if

        # Check that each instance has the same units
        if self.Units != other.Units:
            if traceback:
                print("%s: Different Units: %s, %s" %
                      (self.__class__.__name__,
                       repr(self.Units), repr(other.Units)))
            return False
        #--- End: if

        # Check that each instance has the same fill value
        if self._FillValue != other._FillValue:
            if traceback:
                print("%s: Different _FillValue: %s, %s" %
                      (self.__class__.__name__,
                       self._FillValue, other._FillValue))
            return False
        #--- End: if

        # ------------------------------------------------------------
        # Check that each instance has the array values
        # ------------------------------------------------------------
        # Set default tolerances
        if rtol is None:
            rtol = RTOL()
        if atol is None:
            atol = ATOL()        

        conform_args = self.conform_args(revert_to_file=True)

        other.to_memory()

        for partition in self.partitions.flat():
            array0 = partition.conform(**conform_args)
            array1 = other[partition.indices].varray
            partition.close()
            if not _allclose(array0, array1, rtol=rtol, atol=atol):
                if traceback:
                    print("%s: Different data arrays" %
                          self.__class__.__name__)
                return False
        #--- End: for

        # ------------------------------------------------------------
        # Still here? Then the two instances are equal
        # ------------------------------------------------------------
        return True            
    #--- End: def

    def expand_dims(self, axis=0, dim='None', direction=True):
        '''
        no check is done for dim already being in self.dimensions

Not to be confused with the `expand_partitions_dims` method.

'''
        # Expand direction
        if self.isscalar:
            self.direction = {dim: direction}
        else:
            self.direction[dim] = direction

        # Expand dimensions, shape and ndim
        self.dimensions.insert(axis, dim)
        self._ndim += 1
        shape = list(self._shape)
        shape.insert(axis, 1)   
        self._shape = tuple(shape)

        # Expand the locations map
        range01 = (0, 1)
        for partition in self.partitions.flat():
            partition.location.insert(axis, range01)
            partition.shape.insert(axis, 1)
    #--- End: def

    def flat(self, ignore_masked=True):
        '''

Return a flat iterator over elements of the data array.

:Parameters:

    ignore_masked : bool, optional
        If False then masked and unmasked elements will be returned. By
        default only unmasked elements are returned

:Returns:

    out : generator
        An iterator over elements of the data array.

**Examples**

>>> print d.array
[[1 -- 3]]
>>> for x in d.flat():
...     print x
...
1
3

>>> for x in d.flat(False):
...     print x
...
1
--
3

'''
        self.to_memory()

        mask = self.mask

        if ignore_masked:
            for index in self.iterindices():
                if not mask[index]:
                    yield self[index].array.item()
        else:
            for index in self.iterindices():
                if not mask[index]:
                    yield self[index].array.item()
                else:
                    yield numpy.ma.masked
    #--- End: def

    def override_units(self, units):
        '''

Override the data array units in place.

Not to be confused with setting the `Units` attribute to units which
are equivalent to the original units. This is different because in
this case the new units need not be equivalent to the original ones
and the data array elements will not be changed to reflect the new
units.

:Parameters:

    units : str or Units
        The new units for the data array.

:Returns:

    None

**Examples**

>>> d.Units
<CF Units: hPa>
>>> d.first_datum
1012.0
>>> d.override_units('km')
>>> d.Units
<CF Units: km>
>>> d.first_datum
1012.0
>>> d.override_units(cf.Units('watts'))
>>> d.Units
<CF Units: watts>
>>> d.first_datum
1012.0

'''
        if isinstance(units, basestring):
            units = Units(units)
 
        conform_args = self.conform_args()

        for partition in self.partitions.flat():            
            if partition.Units == units:
                # No need to conform the array if its original units
                # match the original Data units
                partition.Units = units.copy()
                continue
            #--- End: if

            partition.conform(**conform_args)
            partition.Units = units.copy()
            partition.close()
        #--- End: for

        self.Units = units.copy() # not copy
    #--- End: def

    def to_disk(self):
        '''

Store the data array on disk in place.

There is no change to sub-arrays with data that are already on disk.

:Returns:

    None

**Examples**

>>> d.to_disk()

'''
        self.partitions.to_disk()
    #--- End: if

    @property
    def in_memory(self):
        '''

**Examples**

>>> 

'''
        for partition in self.partitions.flat():
            if not partition.in_memory:
                return False
        #--- End: for
        return True
    #--- End: def

    def to_disk(self):
        '''

Store each partition's data on disk in place.

There is no change to partitions with data that are already on disk.


:Returns:

    None

**Examples**

>>> d.to_disk()

'''
        for partition in self.partitions.flat():
            if partition.in_memory:
                partition.to_disk()
    #--- End: def

#    def to_memory(self, regardless=False):
#        '''
#
#Store the data array in memory if it is smaller than the chunk size.
#
#There is no change to sub-arrays with data that are already in memory.
#
#:Parameters:
#    
#    regardless : bool, optional
#        If True then store the data array in memory regardless of its
#        size. By default only store the data array in memory if it is
#        smaller than the chunk size.
#
#:Returns:
#
#    None
#
#**Examples**
#
#>>> d.to_memory()
#>>> d.to_memory(True)
#
#'''
#        self.partitions.to_memory(regardless)
#    #--- End: def

    def to_memory(self, regardless=False):
        '''

Store each partition's data in memory in place if the master array is
smaller than the chunk size.

There is no change to partitions with data that are already in memory.

:Parameters:
    
    regardless : bool, optional
        If True then store all partitions' data in memory regardless
        of the size of the master array. By default only store all
        partitions' data in memory if the master array is smaller than
        the chunk size.

:Returns:

    None

**Examples**

>>> pa.to_memory()
>>> pa.to_memory(True)

'''
        if not self.save_to_disk() or regardless:
            conform_args = self.conform_args(save=False, dtype=None)

            for partition in self.partitions.flat():
                if partition.on_disk:
                    partition.conform(**conform_args)
                    # Note: No need to close the partition
    #--- End: def

    def equivalent(self, other, rtol=None, atol=None, 
                   transpose=False, squeeze=True, use_directions=False):                   
        '''

Equivelence is defined as both instances having the same data arrays
after accounting for different but equivalent units, size one
dimensions (if `squeeze` is True), different dimension directions (if
`use_directions` is True) and different dimension orders (if
`transpose` is set).

:Parameters:

    other : Data

    transpose : dict, optional

    squeeze : bool, optional

    atol : float, optional
        The absolute tolerance for all numerical comparisons, By
        default the value returned by the `ATOL` function is used.

    rtol : float, optional
        The relative tolerance for all numerical comparisons, By
        default the value returned by the `RTOL` function is used.

    use_directions : bool, optional

:Returns:

    out : bool
        Whether or not the two variables are equivalent.

'''     
        if not self.Units.equivalent(other.Units):
            return 
       
        data1 = other.copy()
        
        if squeeze:
            data0 = self.copy()
            data0.squeeze()
            data1.squeeze()
        else:
            data0 = self

        if data0.ndim != data1.ndim:
            return
 
        data0_dimensions = data0.dimensions
        data1_dimensions = data1.dimensions        

        if transpose:
            axes = [data0_dimensions.index(transpose[dim1]) for dim1 in data1_dimensions]
            data1.transpose(axes)
        #--- End: if

        if data0.shape != data1.shape:
            return
 
        if use_directions and self.size > 1:            
            data0_direction = data0.direction
            data1_direction = data1.direction
            
            axes = [dim1
                    for dim1, dim0 in zip(data0_dimensions, data1_dimensions)
                    if data1_direction[dim1] != data0_direction[dim0]]
            data1.flip(axes)
        #--- End: if
        
        data1.Units      = data0.Units.copy()
        data1._FillValue = data0._FillValue

        return data0.equals(data1, rtol=rtol, atol=atol, traceback=False)
    #--- End: def

    def change_dimension_names(self, dim_name_map):
        '''

Change the dimension names.

The dimension names are arbitrary (though unique), so mapping them to
another arbitrary (though unique) set does not change the data array
values, units, dimension directions nor dimension order.

**Examples**

>>> d.dimensions
['dim0', 'dim1', 'dim2']
>>> dim_name_map
{'dim0': 'dim1',
 'dim1': 'dim0',
 'dim2': 'dim2',
 'dim3': 'dim3'}
>>> d.change_dimension_names(dim_name_map)
>>> d.dimensions
['dim1', 'dim0', 'dim2']

'''

        # Dimension order
        self.dimensions = [dim_name_map[dim] for dim in self.dimensions]

        self.partitions.change_dimension_names(dim_name_map)
    #--- End: def

    def conform_args(self, save=False, **kwargs):
        '''
        
Return a dictionary of arguments for the Partition object's `conform`
method.

The values are inferred from the state of the Data object and any
keyword arguments.

:Parameters:

    save : bool, optional
        Set the 'save' key. By default set to `False`.
    
    kwargs :
    

:Returns:

    out : dict

**Examples**


'''
        conform_args = {'dimensions': self.dimensions,
                        'direction' : self.partitions.direction,
                        'units'     : self.Units,
                        'hardmask'  : self.hardmask}

        if save is None:
            save = self.save_to_disk()
            
        conform_args['save'] = save
            
        conform_args['dtype'] = self.dtype

        if kwargs:
            conform_args.update(kwargs)

        return conform_args
    #--- End: def

#    def equivalent(self, other, rtol=None, atol=None):
#        '''
#
#Equivelence is defined as both instances having the same data arrays
#after accounting for different but equivalent units, different
#dimension directions (unless `use_directions` is False) and different
#dimension orders. Equivalent data arrays must have equal numbers of
#dimensions, unless both have one or fewer dimensions.
#
#:Parameters:
#
#    other : Data
#
#    atol : float, optional
#        The absolute tolerance for all numerical comparisons, By
#        default the value returned by the `ATOL` function is used.
#
#    rtol : float, optional
#        The relative tolerance for all numerical comparisons, By
#        default the value returned by the `RTOL` function is used.
#
#:Returns:
#
#    out : bool
#        Whether or not the two variables are equivalent.
#
#'''
#        if self.size != other.size:
#            return False
#        
#        if not self.Units.equivalent(other.Units):
#            return False
#
#        data0 = self.copy()
#        data1 = other.copy()
#
#        data0.squeeze()
#        data1.squeeze()
#        
#        data1.Units = data0.Units.copy()
#
#        data1._FillValue = data0._FillValue
#
#        return data0.equals(data1, rtol=rtol, atol=atol, traceback=False)
#    #--- End: def

    def expand_partition_dims(self, pdim):
        '''

Insert a new size 1 partition dimension in place.

The new parition dimension is inserted at position 0.

Not to be confused with the `expand_dims` method.

:Parameters:

    axis : str
        The name of the new partition dimension.

:Returns:

    None

**Examples**

>>> d.pdimensions
['dim0', 'dim1']
>>> d.expand_partition_dims('dim2')
>>> d.pdimensions
['dim2', 'dim0', 'dim1']

'''
        self.partitions.expand_dims(pdim)
    #--- End: def
        
    def new_dimension_name(self):
        '''

Return a dimension name not being used by the data array.

Note that a partition of the data array may have dimensions which
don't belong to the data array itself.

:Returns:

    out : str
        The new dimension name.

**Examples**

>>> d.dimensions
['dim1', 'dim0']
>>> d.partitions.info('dimensions')
[['dim0', 'dim0'],
 ['dim1', 'dim0', 'dim2']]
>>> d.new_dimension_name()
'dim3'

'''
        all_dim_names = set()
        for partition in self.partitions.flat():
            all_dim_names.update(set(partition.dimensions))

        N   = len(all_dim_names)
        dim = 'dim%d' % N
        while dim in all_dim_names:
            N += 1
            dim = 'dim%d' % N
        #--- End: while

        return dim
    #--- End: def

    def partition_boundaries(self):
        '''
'''            
        return self.partitions.partition_boundaries(self.dimensions)
    #--- End: def

    def flip(self, axes=None):
        '''

Flip dimensions of the data array in place.

:Parameters:

    axes : int or sequence of ints
        Flip the dimensions whose positions are given. By default all
        dimensions are flipped.

:Returns:

    out : list of ints
        The axes which were flipped, in arbitrary dimensions.

**Examples**

>>> d.flip()
>>> d.flip(1)

>>> e = d[::-1, :, ::-1]
>>> d.flip([2, 0]).equals(e)
True

'''
        def _reverse_across_partitions(partitions, pdimensions, pdim):
            '''
            in place
            '''
            if not pdimensions:
                return    
            if pdim == pdimensions[-1]:
                partitions.reverse()
            else:
                pdimensions = pdimensions[0:-1]
                if not pdimensions:
                    return    
                for nested_partitions in partitions:
                    # Recursive call
                    _reverse_across_partitions(nested_partitions, pdimensions, pdim)
        #--- End: def

        if axes is None or (not axes and axes != 0):
            return

        # Scalar data. Allow axis in (0, None) to reverse the
        # direction
        if self.isscalar:
            if axes:
                raise ValueError(
                    "Can't reverse: Invalid axis for this array: %s" % (axes,))
            self.direction = not self.direction
            return False  # DCH
        #--- End: if

        dimensions = self.dimensions       

        axes = self._parse_axes(axes, 'flip', default=range(self.ndim))
        axes2 = dict([(i, dimensions[i]) for i in axes])

        # Reverse each of the requested dimensions    

        partitions  = self.partitions
        direction   = self.direction
        shape       = self.shape
        pdimensions = self.pdimensions
        reversed_partition_array = False

        for i, dim in axes2.iteritems():
            if shape[i] == 1:
                continue

            direction[dim] = not direction[dim]
            if dim in pdimensions:
                reversed_partition_array = True
                _reverse_across_partitions(partitions, pdimensions, dim)
        #--- End: for

        # Recreate the location map
        if reversed_partition_array:
            self._set_location_map()

        return axes2.keys()
    #--- End: def

    def save_to_disk(self, itemsize=None):
        '''

Put the data array on disk.

:Parameters:

    itemsize : int, optional

:Returns:

    out : bool

'''
        if not itemsize:
            if not hasattr(self, 'dtype'):
                raise ValueError(
                    "save_to_disk: Must set itemsize if there is no dtype")

            itemsize = self.dtype.itemsize
        #--- End: if

        if self.size > CHUNKSIZE()/(itemsize + 1.0):
            # The size of the data array (including a mask) is greater
            # than the chunk size, so return True
            return True

        if free_memory() < FM_THRESHOLD():
            # The available memory is less than the minimum amount
            # that we want to try to keep free, so return True
            return True

        # Still here? Then this array is small enough to remain in memory
        return False
    #--- End: def

    def setitem(self, value, indices=Ellipsis, condition=None, masked=None,
                ref_mask=None, hardmask=None):
        '''

'''
        original_hardmask = self.hardmask
        if hardmask is None:
            hardmask = self.hardmask
        else:
            self.hardmask = hardmask        

        args = [condition, masked, ref_mask]
        n_set = len(args) - args.count(None)
        
        if n_set > 1:
            raise ValueError("can't set more than one thing")

        if not n_set:
            # --------------------------------------------------------
            # Set elements described by indices
            # --------------------------------------------------------
            self[indices] = value
        
        else:
            conform_args = self.conform_args()
        
            if value is not numpy_ma_masked:
                if not isinstance(value, Data):
                    # Convert to the value to a Data object
                    value = type(self)(value, units=self.Units)
                else:
                    value = value.copy()
                    if value.Units and value.Units.equivalent(self.Units):
                        value.Units = self.Units
                    elif not value.Units:
                        value.override_units(self.Units)
                    else:
                        raise ValueError(" Some bad units %s, %s" %
                                         (repr(self.Units), repr(value.Units)))
                #--- End: if

                # The following options all require a logically scalar
                # value
                if value.size > 1:
                    raise ValueError("not size 1 oooooh")

                value = value.first_datum
            #--- End: if

            if indices is not Ellipsis:
                indices, dummy = _parse_indices(self, indices)
        
            if masked is not None:
                if not masked:
                    # ------------------------------------------------
                    # Set unmasked elements
                    # ------------------------------------------------
                    self.hardmask = True
                    self[indices] = value

#                    if indices is Ellipsis:
#                        # Do this everywhere
#                        for partition in self.partitions.flat():
#                            partition.conform(**conform_args)
#                            partition.data[...] = value
#                            partition.close()
#                        #--- End: for
#                
#                    else:
#                        # Do this only for the specified indices
#                        for partition in self.partitions.flat():
#                            p_indices, shape = _partition_overlaps(partition, indices)
#                            if not p_indices:
#                                # This partition does not overlap the indices
#                                continue
#                            
#                            array = partition.conform(**conform_args)
#                            partition.data[p_indices] = value
#                            partition.close()
#                        #--- End: for    
#
                else:
                    # ------------------------------------------------
                    # Unmask masked elements and set them
                    # ------------------------------------------------
                    conform_args['hardmask'] = False
                    if indices is Ellipsis:
                        # Do this everywhere
                        for partition in self.partitions.flat():
                            array = partition.conform(**conform_args)
                            mask = numpy.ma.getmaskarray(array)
                            partition.data = numpy_ma_where(mask==True, value, array)
                            partition.close()
                        #--- End: for

                    else:
                        # Do this only for the specified indices
                        for partition in self.partitions.flat():
                            p_indices, shape = _partition_overlaps(partition, indices)
                            if not p_indices:
                                # This partition does not overlap the indices
                                continue

                            array = partition.conform(**conform_args)
                            array = array[p_indices]
                            mask = numpy.ma.getmaskarray(array)
                            partition.data[p_indices] = numpy_ma_where(mask==True,
                                                                       value, array)
                            partition.close()
                        #--- End: for    
                #--- End: if

            elif ref_mask is not None:
                # ----------------------------------------------------
                # Set elements which correspond to False elements of
                # another array-like object.
                # ----------------------------------------------------
                self_mask = self.mask
                self_mask[indices] = ref_mask

                if indices is Ellipsis:
                    # Do this everywhere
                    for partition in self.partitions.flat():
                        array = partition.conform(**conform_args)
                        mask  = self_mask[partition.indices].varray
                        array = numpy_ma_where(array.mask==mask, value, array)
                        partition.close()
                    #--- End: for
                        
                else:
                    # Do this only for the specified indices
                    for partition in self.partitions.flat():
                        p_indices, shape = _partition_overlaps(partition, indices)
                        if not p_indices:
                            # This partition does not overlap the indices
                            continue
                        
                        array = partition.conform(**conform_args)
                        array = array[p_indices]
                        mask  = self_mask[partition.indices]
                        mask  = mask[p_indices].varray
                        partition.data[p_indices] = numpy_ma_where(array.mask==mask,
                                                                   value, array)
                        partition.close()
                    #--- End: for
        
            elif condition is not None:
                # ----------------------------------------------------
                # Set elements whose values pass a condition.
                # ----------------------------------------------------
                if indices is Ellipsis:
                    # Do this everywhere
                    for partition in self.partitions.flat():
                        array = partition.conform(**conform_args)
                        partition.data = numpy_ma_where(array==condition,
                                                        value, array)
                        partition.close()
                    #--- End: for

                else:
                    # Do this only for the specified indices
                    for partition in self.partitions.flat():
                        p_indices, shape = _partition_overlaps(partition, indices)
                        if not p_indices:
                            # This partition does not overlap the indices
                            continue
                        
                        array = partition.conform(**conform_args)
                        array = array[p_indices]
                        partition.data[p_indices] = numpy_ma_where(array==condition,
                                                                   value, array)
                        partition.close()
            #--- End: if

        #--- End: if

        # Reset hardmask
        self.hardmask = original_hardmask
    #--- End: def

    def setmask(self, value, indices=Ellipsis):
        '''

Set selected elements of the data array's mask in place.

The value to which the selected elements of the mask will be set may
be any object which is broadcastable across the selected elements. The
broadcasted value may be of any data type but will be evaluated as
boolean.

Unmasked elements are set to the fill value.

The mask may be effectively removed by setting every element to False
with ``f.setmask(False)``.

Note that if and only if the value to be assigned is logically scalar
and evaluates to True then ``f.setmask(value, indices)`` is equivalent
to ``f.setitem(cf.masked, indices)``. This is consistent with the
behaviour of numpy masked arrays.

:Parameters:

    value : array-like
        The value to which the selected element s of the mask will be
        set. Must be an object which is broadcastable across the
        selected elements.

    indices : optional
        Indices of the data array. Only elements of the mask described
        by the indices are set to `value`. By default, the entire mask
        is considered.

:Returns:

    None

**Examples**

'''
        original_hardmask = self.hardmask
        self.hardmask = False

        conform_args = self.conform_args() #dtype=None)
             
        if value is not numpy_ma_masked:
            if not isinstance(value, Data):
                value = type(self)(value)

            if not value.any():
                value = numpy_ma_nomask
            elif value.all():
                value = numpy_ma_masked
        #--- End: if

        if indices is not Ellipsis:
            indices, dummy = _parse_indices(self, indices)

        if value is numpy_ma_nomask:
            # ------------------------------------------------
            # Unmask elements
            # ------------------------------------------------
            if indices is Ellipsis:
                # Set the mask to False everywhere
                for partition in self.partitions.flat():
                    array = partition.conform(**conform_args)
                    if numpy_ma_is_masked(array):
                        array[...] = array.filled()
                        array.shrink_mask()
                    #--- End: if

                    partition.close()
                #--- End: for
                  
            else:
                # Set the mask to False at the selected indices
                for partition in self.partitions.flat():
                    p_indices, shape = _partition_overlaps(partition, indices)
                    if not p_indices:
                        # This partition does not overlap the indices
                        continue
                    
                    array = partition.conform(**conform_args)
                    
                    a = array[p_indices]
                    if numpy_ma_is_masked(a):
                        array[p_indices] = a.filled()
                        array.shrink_mask()
                    #--- End: if

                    partition.close()
                #--- End: for    

        elif value is numpy_ma_masked:
            # --------------------------------------------------------
            # Mask elements
            # --------------------------------------------------------  
            if indices is Ellipsis:
                # Set the mask to True everywhere
                for partition in self.partitions.flat():
                    array = partition.conform(**conform_args)
                    array.mask = numpy_ma_masked
                    partition.close()
                #--- End: for
                  
            else:
                # Set the mask to True at the selected indices
                for partition in self.partitions.flat():
                    p_indices, shape = _partition_overlaps(partition, indices)
                    if not p_indices:
                        # This partition does not overlap the indices
                        continue
                    
                    array = partition.conform(**conform_args)
                    array[p_indices] = numpy_ma_masked
                    partition.close()
                #--- End: for   
                
        else:
            # --------------------------------------------------------
            # Set mask from an array
            # --------------------------------------------------------
            mask = self.mask
            mask[indices] = value

            if indices is Ellipsis:
                # Set the mask everywhere
                for partition in self.partitions.flat():
                    array = partition.conform(**conform_args)
                    array.mask = mask[partition.indices].varray
                    array.shrink_mask()
                    partition.close()
                #--- End: for
            
            else:
                # Set the mask at the selected indices
                for partition in self.partitions.flat():
                    p_indices, shape = _partition_overlaps(partition, indices)
                    if not p_indices:
                        # This partition does not overlap the indices
                        continue
                    
                    array = partition.conform(**conform_args) 
                    array.mask = mask[partition.indices].varray
                    array.shrink_mask()
                    partition.close()
                #--- End: for    
        #--- End: if

        # Reset hardmask
        self.hardmask = original_hardmask
    #--- End: def

    def sin(self):
        '''
Take the trigonometric sine of the data array in place.

Units are accounted for in the calcualtion, so that the the sine of 90
degrees_east is 1.0, as is the sine of 1.57079632 radians. If the
units are not equivalent to radians (such as Kelvin) then they are
treated as if they were radians.

The Units are changed to '1' (nondimensionsal).

:Returns:

    None

**Examples**

>>> d.Units
<CF Units: degrees_north>
>>> print d.array
[[-90 0 90 --]]
>>> d.sin()
>>> d.Units
<CF Units: 1>
>>> print d.array
[[-1.0 0.0 1.0 --]]

>>> d.Units
<CF Units: m s-1>
>>> print d.array
[[1 2 3 --]]
>>> d.sin()
>>> d.Units
<CF Units: 1>
>>> print d.array
[[0.841470984808 0.909297426826 0.14112000806 --]]

'''
        radians = Units('radians')
        if self.Units.equivalent(radians):
            self.Units = radians

        self.func(numpy.ma.sin)

        self.override_units('1')
    #--- End: def

    def squeeze(self, axes=None):
        '''

Remove size 1 dimensions from the data in place.

:Parameters:

    axes : (sequence of) int or str
        The axes to be squeezed. May be one of, or a sequence of any
        combination of:

            * The integer position of a dimension in the data array
              (negative indices allowed).
            * The internal name a dimension.

:Returns:

    out : list of ints
        The axes which were squeezed as a tuple of their positions.

**Examples**

>>> v.shape
(1,)
>>> v.squeeze()
>>> v.shape
()

>>> v.shape
(1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1)
>>> v.squeeze((0,))
>>> v.shape
(2, 1, 3, 1, 4, 1, 5, 1, 6, 1)
>>> v.squeeze(1)
>>> v.shape
(2, 3, 1, 4, 1, 5, 1, 6, 1)
>>> v.squeeze([2, 4])
>>> v.shape
(2, 3, 4, 5, 1, 6, 1)
>>> v.squeeze()
>>> v.shape
(2, 3, 4, 5, 6)

'''
        if self.isscalar:
            if axes or axes == 0:
                raise ValueError(
                    "Can't squeeze: Can't remove a dimension from a scalar %s" %
                    self.__class__.__name__)
            return []
        #--- End: if

        shape = list(self.shape)

        axes = self._parse_axes(axes, 'squeeze')
        if not axes:
            axes = [i for i in range(self.ndim) if shape[i] == 1]

            # Check the squeeze dimensions
            for i in axes:
                if shape[i] > 1:
                    raise ValueError(
                        "Can't squeeze: Can't remove dimension of size > 1" %
                        self.__class__.__name__)
        #--- End: if

        if not axes:
            return axes

        # Still here? Then the data array is not scalar and at least
        # one size 1 dimension needs squeezing.
        dimensions     = self.dimensions
        direction = self.direction

        dims = [dimensions[i] for i in axes]

        for dim in dims:
            i = dimensions.index(dim)
            shape.pop(i)
            dir = direction.pop(dimensions.pop(i))
            for partition in self.partitions.flat():
                partition.location.pop(i)
                partition.shape.pop(i)
        #--- End: for

        self._ndim  = len(shape)
        self._shape = tuple(shape)

        # Set the direction if the data has been squeezed to a scalar
        if self.isscalar:
            self.direction = dir

        # Remove size 1 partition dimensions       
        self.partitions.squeeze()

        return axes
    #--- End: def

    def transpose(self, axes=None):
        '''
        
Permute the dimensions of the data array in place.

:Parameters:

    axes : sequence, optional
        The new order of the data array. By default, reverse the
        dimensions' order, otherwise the axes are permuted according
        to the values given. The values of the sequence may be any
        combination of:

            * The integer position of a dimension in the data array.
            * The internal name a dimension.

:Returns:

    None

**Examples**

>>> d.ndim
3
>>> d.transpose()
>>> d.transpose([1, 0, 2])
>>> d.transpose(['dim2', 'dim0', 'dim1'])
>>> d.transpose((1, 0, 'dim2'))

'''
        ndim  = self.ndim
        dimensions = self.dimensions
        
        if ndim <= 1:
            return
        
        # Parse the axes. By default, reverse the dimensions
        axes = self._parse_axes(axes, 'transpose',
                                default=range(ndim-1, -1, -1))

        # Return unchanged if axes are in the same order as the data
        if axes == range(ndim):
            return

        if len(axes) != ndim:
            raise ValueError(
                "Can't tranpose: Axes don't match array: %s" % (axes,))

        # Permute the order
        self.dimensions = [dimensions[i] for i in axes]

        # Permute the shape
        shape = self.shape
        self._shape = tuple([shape[i] for i in axes])
        
        # Permute the locations map
        for partition in self.partitions.flat():
            location = partition.location
            shape    = partition.shape
            partition.location = [location[i] for i in axes]
            partition.shape    = [shape[i]    for i in axes]
    #--- End: def

    def func(self, f, *args, **kwargs):
        '''

Apply an element-wise array operation to the data array in place.

:Parameters:

    f : function
        The function to be applied.

    args, kwargs : 
        Any arguments and keyword arguments passed to the function
        given by the `f` paramaeter.

:Returns:

   None

**Examples**

>>> print d.array
[[ 0.          1.57079633]
 [ 3.14159265  4.71238898]]
>>> import numpy
>>> d.func(numpy.cos)
>>> print d.array
[[ 1.0  0.0]
 [-1.0  0.0]]
>>> def f(x, y, a=0):
...     return x*y + a
...
>>> d.func(f, 2, a=10)
>>> print d.array
[[ 12.0  10.0]
 [-12.0  10.0]]

'''      
        conform_args = self.conform_args()

        for partition in self.partitions.flat():
            array = partition.conform(**conform_args)
            partition.data = f(array, *args, **kwargs)
            partition.close()
        #--- End: for
    #--- End: def

#--- End: class

    
def _size_of_index(index, size=None):
    '''

Return the number of elements resulting in applying an index to a
sequence.

:Parameters:

    index : slice or list of ints
        The index being applied to the sequence.

    size : int, optional
        The number of elements in the sequence being indexed. Only
        required of `index` is a slice object.

:Returns:

    out : int
        The length of the sequence resulting from applying the index.

**Examples**

>>> _size_of_index(slice(None, None, -2), 10)
5
>>> _size_of_index([1, 4, 9])
3

'''
    if isinstance(index, slice):
        # Index is a slice object
        start, stop, step = index.indices(size)
        div, mod = divmod(stop-start, step)
        if mod != 0:
            div += 1
        return div
    else:
        # Index is a list of integers
        return len(index)
#--- End: def

def _parse_indices(data, indices):
    '''

:Parameters:

   data : 

   indices : sequence of indices

:Returns:

    parsed_indices, reverse_dimensions : {list, list} 

**Examples**

'''
    parsed_indices = parse_indices(data, indices)

    reverse_dimensions = []

    for i, index in enumerate(parsed_indices):      

        if isinstance(index, slice):
            size = data.shape[i]
            if index.step < 0:              
                # If the slice step is negative, then transform the
                # original slice to a new slice with a positive step
                # such that the result of the new slice is the reverse
                # of the result of the original slice.
                #
                # For example, if the original slice is slice(6,0,-2)
                # then the new slice will be slice(2,7,2):
                #
                # >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
                # >>> a[slice(6, 0, -2)]
                # [6, 4, 2]
                # >>> a[slice(2, 7, 2)]
                # [2, 4, 6]
                # a[slice(6, 0, -2)] == list(reversed(a[slice(2, 7, 2)]))
                # True
                start, stop, step = index.indices(size)
                step    *= -1
                div, mod = divmod(start-stop-1, step)
                div_step = div*step
                start   -= div_step
                stop     = start + div_step + 1
                
                index = slice(start, stop, step)
                reverse_dimensions.append(i)         
            #--- End: if      

            # If step is greater than one, make sure that index.stop isn't
            # bigger than it needs to be.
            if index.step > 1:
                start, stop, step = index.indices(size)
                div, mod = divmod(stop-start-1, step)
                stop     = start + div*step + 1
                index    = slice(start, stop, step)
            #--- End: if

            parsed_indices[i] = index    

        else:
            # --------------------------------------------------------
            # Check that an integer list is strictly monotonic and if
            # it's descending reverse it so that it's ascending
            # --------------------------------------------------------
            step = index[1] - index[0]

            if step > 0:                        
                if not all(x < y for x, y in zip(index, index[1:])):
                    raise ValueError("Bad slice (not strictly monotonic): %s" % index)

            elif step < 0:
                if not all(x > y for x, y in zip(index, index[1:])):
                    raise ValueError("Bad slice (not strictly monotonic): %s" % index)
                
                # Reverse the list so that it's strictly monotonically
                # increasing and make a note that this dimension will
                # need reversing later
                index.reverse()
                reverse_dimensions.append(i)
            else:
                # Step is 0
                raise ValueError("Bad slice (not strictly monotonic): %s" % index)
        #--- End: if
    #--- End: for
    
    return parsed_indices, reverse_dimensions
#--- End: def

def _partition_overlaps(partition, indices):
    '''

:Parameters:

   partition : Partition

   indices : tuple
       A tuple of indices describing a subset of the master array.

:Returns:

    p_indices, shape : {tuple, list}
        If the partition overlaps the indices then return a list of
        indices which will subset the partition's data to where it
        overlaps the master indices and the subsetted partition's
        shape as a list. Otherwise return`(None, None)`.

**Examples**

>>> indices = (slice(None), slice(5, 1, -2), [1,3,4,8])
>>> _partition_overlaps(p, indices)
(slice(), ddfsfsd), [3, 5, 4]

'''
    p_indices = []
    shape    = []

    for index, (r0, r1) in zip(indices, partition.location):
        
        if isinstance(index, slice):
            
            size = r1 - r0
            stop = size
            if index.stop < r1:
                stop -= (r1 - index.stop)
              
            start = index.start - r0
            if start < 0:
                start %= index.step   # start is now +ve
            
            if start >= stop:
                # This partition is not in the slice
                return [], []
                
            # Still here?
            index      = slice(start, stop, index.step)
            index_size = _size_of_index(index, size)

        else:
            if not set(index).intersection(range(r0, r1)):
                # This partition is not in the slice
                return [], []
            
            # Still here?
            index = [x - r0
                     for x in index
                     if r0 <= x < r1]
            index_size = len(index)
            if index == range(index_size):
                index = slice(0, index_size, 1)
            else:
                if index == range(index_size-1,-1,-1):
                    index = slice(index_size-1, None, -1)
        #--- End: if

        # Still here?
        p_indices.append(index)
        shape.append(index_size)
    #--- End: for
        
    # Still here? Then the elements of this partition specified by
    # p_indices are in the slice
    return tuple(p_indices), shape
#--- End: def

def _overlapping_partitions(partitions, indices, dim2position):
    '''

Return the nested list of (modified) partitions which overlap the
indices.

:Parameters:

    partitions : PartitionArray

    indices : tuple

    dim2position : dict

:Returns:

    out : list
        A (possibly nested) list of Partition objects.

**Examples**

>>> type f.Data
<class 'cf.data.Data'>
>>> d.dimensions
['dim1', 'dim2', 'dim0']
>>> dim2position = {'dim0': 2, 'dim1': 0, 'dim2' : 1}
>>> indices = (slice(None), slice(5, 1, -2), [1,3,4,8])
>>> x = _overlapping_partitions(d.partitions, indices, dim2position)
>>> type(x)
list

'''
    new_list = []
    if isinstance(partitions[0], Partition):

        for partition in partitions:

            # Find out if this partition overlaps the original slice
            p_indices, shape = _partition_overlaps(partition, indices)

            if not p_indices:
                # This partition is not in the slice
                continue

            # Still here? Create a new partition                      
            partition      = partition.copy()

            partition.part = partition.new_part(list(p_indices),
                                                dim2position,
                                                partitions.direction)
            partition.shape = shape

            new_list.append(partition)
        #--- End: for

    else:
        for nested_partitions in partitions:
            # Recursive call
            om = _overlapping_partitions(nested_partitions,
                                         indices,
                                         dim2position)
            if om:
                new_list.append(om)
        #--- End: if    
    #--- End: for

    return new_list
#--- End: def
