import numpy
import time
from operator   import attrgetter, itemgetter
from hashlib    import sha1 as hashlib_sha1
from copy       import deepcopy
from .                   import __version__
from .ancillaryvariables import AncillaryVariables
from .fieldlist          import FieldList
from .transform          import Transform
from .units              import Units
from .functions          import flat, RTOL, ATOL, equals

# Global properties, as defined in Appendix A.
_global_properties = set(('comment',
                          'Conventions',
                          'history',
                          'institution',
                          'references',
                          'source',
                          'title',
                          ))

# Data variable properties, as defined in Appendix A, without those
# which are not simple. And less 'long_name'.
_data_properties = set(('add_offset',
                        'cell_methods',
                        '_FillValue',
                        'flag_masks',
                        'flag_meanings',
                        'flag_values',
                        'missing_value',
                        'scale_factor',
                        'standard_error_multiplier',
                        'standard_name',
                        'units',
                        'valid_max',
                        'valid_min',
                        'valid_range',
                        ))

_no_units = Units()

class _Meta(object):
    '''

A summary of a field's metadata.

'''
    _canonical_units = {}
    '''
doof
'''

    _canonical_cell_methods = []
    '''
woof
'''

    def __init__(self, f, rtol=None, atol=None, messages=False,
                 strict_units=True, strict_identity=True,
                 equal_all=False, equal_ignore=(), equal=(),
                 exist_all=False, exist_ignore=(), exist=()):
        '''

**initialization**

:Parameters:

    f : Field

    messages : bool

    strict_units : bool, optional
        If False then assume that fields or their components (such as
        coordinates) with the same identity but missing units all have
        equivalent (but unspecified) units, so that aggregation may
        occur. By default such fields are not aggregatable.

    strict_identity : bool, optional
        If False then treat fields with data arrays but no identities
        as having equal identities, and so are potentially
        aggregatable. By default fields with data arrays but no
        identities are not aggregatable.

**Examples**

'''
        self._nonzero = False

        self.messages = messages
        
        # ------------------------------------------------------------
        # Field
        # ------------------------------------------------------------
        self.field    = f
        self.hasData  = f.hasData
        self.identity = f.identity()

        if self.identity is None:
            if strict_identity and self.hasData:
                if messages:
                    print(
"Field has data but no identity and strict_identity=%s: %s" %
(strict_identity, repr(f)))
                return
        else:
            if not self.hasData:
                if messages:
                    print(
"Field has identity but no data and strict_identity=%s: %s" %
(strict_identity, repr(f)))
                return
        #--- End: if

        space = f.space
        self.space = space
 
        self.units = self.canonical_units(f, self.identity,
                                          strict_units=strict_units)

        self.cell_methods = self.canonical_cell_methods(rtol=rtol, atol=atol)

        # ------------------------------------------------------------
        # Formula_terms
        # ------------------------------------------------------------
        self.formula_terms = {}
        self.transform_ids = {}
        for transform_id, transform in space.transforms.iteritems():
            if not transform.isgrid_mapping:
                name = transform.name
                self.formula_terms[name] = transform
                self.transform_ids[name] = transform_id
        #--- End: for

        # ------------------------------------------------------------
        # Ancillary variables
        # ------------------------------------------------------------
        self.ancillary_variables = self.find_ancillary_variables()
        if self.ancillary_variables is None:            
            return
           
        # ------------------------------------------------------------
        # Coordinate and cell measure arrays
        # ------------------------------------------------------------
        self.hash_values  = {}
        self.first_values = {}
        self.last_values  = {}
        self.first_bounds = {}
        self.last_bounds  = {}

        self.id_to_dim = {}
        self.dim_to_id = {}

        aux_coords_1d = {}
        aux_coords_Nd = {}
        for aux, coord in space.aux_coords().iteritems():
            if coord.ndim <= 1:
                aux_coords_1d[aux] = coord
            else:
                aux_coords_Nd[aux] = coord
        #--- End: for
            
        # ----------------------------------------------------------------
        # 1-d dimension and auxiliary coordinates
        # ----------------------------------------------------------------
        self.all_identities = set()
        self.dim = {}
        self.sort_indices = {}
        self.sort_keys    = {}
    
        for dim in space.dimension_sizes:
    
            # List some information about each 1-d coordinate which spans
            # this dimension. The order of elements in this is arbitrary,
            # as ultimately it will get sorted by the 'name' key values.
            #
            # For example:
            # >>> info
            # [{'name': 'time', 'key': 'dim0', 'units': <CF Units: ...>},
            #  {'name': 'forecast_ref_time', 'key': 'aux0', 'units': <CF Units: ...>}]
            info = []
    
            if dim in space: 
                # This dimension of the space has a dimension coordinate
                coord = space[dim]

                identity = self.coord_has_identity_and_data(coord)
                if identity is None:
                    return

                # Find the canonical units for this dimension
                # coordinate
                units = self.canonical_units(coord, identity,
                                             strict_units=strict_units)
    
                info.append({'identity'  : identity,
                             'key'       : dim,
                             'units'     : units,
                             'isbounded' : coord.isbounded,
                             'transforms': self.find_transforms(coord)})

                dim_coord_id = identity
    
            else:
                # This dimension of the space does not have a dimension
                # coordinate
                dim_coord_id = None
            #--- End: if
    
            # Find the 1-d auxiliary coordinates which span this
            # dimension
            aux_coords = {}
            for aux in aux_coords_1d.keys():
                if dim in space.dimensions[aux]:
                    aux_coords[aux] = aux_coords_1d.pop(aux)
            #--- End: for
    
            for aux, coord in aux_coords.iteritems():
                
                identity = self.coord_has_identity_and_data(coord)
                if identity is None:
                    return
    
                # Find the canonical units for this 1-d auxiliary
                # coordinate
                units = self.canonical_units(coord, identity,
                                             strict_units=strict_units)

                info.append({'identity'  : identity,
                             'key'       : aux,
                             'units'     : units,
                             'isbounded' : coord.isbounded,
                             'transforms': self.find_transforms(coord)})
            #--- End: for
    
            if not info:
                if messages:
                    print(
"Can't aggregate %s: A dimension has no 1-d coordinates" % repr(f))
                return
            #--- End: if
    
            # Sort the info by identity
            info.sort(key=itemgetter('identity'))
    
            # Find the canonical identity for this dimension
            identity = info[0]['identity']

            self.dim[identity] = \
                {'ids'       : tuple([i['identity']   for i in info]),
                 'keys'      : tuple([i['key']        for i in info]),
                 'units'     : tuple([i['units']      for i in info]),
                 'isbounded' : tuple([i['isbounded']  for i in info]),
                 'transforms': tuple([i['transforms'] for i in info])}
            
            d = self.dim[identity]
            if dim_coord_id is not None:
                d['dim_coord_index'] = d['ids'].index(dim_coord_id)
            else:
                d['dim_coord_index'] = None
    
            # Map the space's dimensions to their canonical names
            self.id_to_dim[identity] = dim
            self.dim_to_id[dim]      = identity
        #--- End: for
    
        # Create a sorted list of the canonical dimension names
        self.dim_ids = sorted(self.dim)
    
        self.calculate_hash_values = set(self.dim_ids)

        # ------------------------------------------------------------
        # N-d auxiliary coordinates
        # ------------------------------------------------------------
        self.aux = {}
        for aux, coord in aux_coords_Nd.iteritems():
           
            identity = self.coord_has_identity_and_data(coord)
            if identity is None:
                return

            # Find this N-d auxiliary coordinate canonical units
            units = self.canonical_units(coord, identity,
                                         strict_units=strict_units)
            
            # Find this N-d auxiliary coordinate's canonical dimension
            # names
            dim_ids = [self.dim_to_id[dim] for dim in space.dimensions[aux]]
            dim_ids = tuple(sorted(dim_ids))

            self.aux[identity] = {'key'       : aux,
                                  'units'     : units,
                                  'dim_ids'   : dim_ids,
                                  'isbounded' : coord.isbounded,
                                  'transforms': self.find_transforms(coord)}
        #--- End: for
    
        # ------------------------------------------------------------
        # Cell measures
        # ------------------------------------------------------------
        self.cm = {}
        info    = {}
        for key, cm in space.cell_measures().iteritems():
            
            if not self.cell_measure_has_data_and_units(cm):
                return

            # Find the canonical units for this cell measure
            units = self.canonical_units(cm, cm.identity(),
                                         strict_units=strict_units)
            
            # Find this cell measure's canonical dimension names
            dim_ids = [self.dim_to_id[dim] for dim in space.dimensions[key]]
            dim_ids = tuple(sorted(dim_ids))
            
            if units in info:
                # Check for ambiguous cell measures, i.e. those which have
                # the same units and span the same dimensions
                for value in info[units]:
                    if dim_ids == value['dim_ids']:
                        if messages:
                           print(
"Can't aggregate %s: Ambiguous '%s' cell measures" % (repr(f), repr(units)))
                        return
            else:
                info[units] = []
            #--- End: if
    
            info[units].append({'key'    : key,
                                'dim_ids': dim_ids})
        #--- End: for
    
        # For each cell measures' canonical unit, sort the info by
        # dimension names
        for units, value in info.iteritems():
            value.sort(key=itemgetter('dim_ids'))        
            self.cm[units] = {'keys'   : tuple([i['key']     for i in value]),
                              'dim_ids': tuple([i['dim_ids'] for i in value]),
                              'hash_values': (None,) * len(value)}
        #--- End: for

        # ------------------------------------------------------------
        # Properties and attributes
        # ------------------------------------------------------------
        properties = set(f.properties)
        properties.difference_update(_data_properties)

        # Properties which must exist in both fields and are equal
        x = []

        if equal:            
            p = dict([(prop, f.getprop(prop)) for prop in equal
                      if prop not in equal_ignore and prop in properties])
            properties.difference_update(p)
            x.append(tuple(sorted(p.items())))
        else:
            x.append(None)

        if equal_all:        
            p = dict([(prop, f.getprop(prop))
                      for prop in (properties.difference(('long_name',)) - 
                                   _global_properties.union(exist_ignore))])
            properties.difference_update(p)
            x.append(tuple(sorted(p.items())))
        else:
            x.append(None)

        self.equal_properties = tuple(x)

        # Properties which might not exist in either field and are not
        # necessarily equal
        self.properties = properties        

        # Properties which must exist in both fields but are not
        # necessarily equal
        if exist_all:
            exist_properties = properties.difference(('long_name',))
            exist_properties.difference_update(_global_properties)
        else:
            exist_properties = set()

        if exist:
            exist_properties.update(exist)

        exist_properties.difference_update(exist_ignore)

        self.exist_properties = tuple(sorted(exist_properties))

        # Attributes
        self.attributes = set(('file',))

        # ----------------------------------------------------------------
        # Still here? Then create the structural signature.
        # ----------------------------------------------------------------
        self.structural_signature()

        self._nonzero = True
    #--- End: def

    def __nonzero__(self):
        '''
x.__nonzero__() <==> bool(x)

'''
        return self._nonzero
    #--- End: if

    def __repr__(self):
        '''
x.__repr__() <==> repr(x)

'''
        return '<CF %s: %s>' % (self.__class__.__name__,
                                repr(getattr(self, 'field', None)))
    #--- End: def

    def __str__(self):
        '''
x.__str__() <==> str(x)

'''
        strings = []
        for attr in sorted(self.__dict__):
            strings.append('%s.%s = %s' % (self.__class__.__name__, attr,
                                           repr(getattr(self, attr))))
            
        return '\n'.join(strings)
    #--- End: def

    def canonical_units(self, variable, identity, strict_units=True):
        '''

Updates the `_canonical_units` attribute.

:Parameters:

    variable : Variable

    identity : str

    strict_units : bool
        If False then assume that variables with the same identity but
        missing units all have equal units. By default fields
        containing such variables are not aggregatable.

    strict_units : bool, optional
        If False then assume that fields or their components (such as
        coordinates) with the same identity but missing units all have
        equivalent (but unspecified) units, so that aggregation may
        occur. By default such fields are not aggregatable.

    strict_identity : bool, optional
        If False then treat fields with data arrays but no identities
        as having equal identities, and so are potentially
        aggregatable. By default fields with data arrays but no
        identities are not aggregatable.

:Returns:

    out : Units or None

**Examples**

'''
        var_units = variable.Units

        _canonical_units = self._canonical_units

        if identity in _canonical_units:
            if var_units:
                for u in _canonical_units[identity]:
                    if var_units.equivalent(u):
                        return u
                #--- End: for
    
                # Still here?
                _canonical_units[identity].append(var_units)

            elif not strict_units or variable.dtype.kind == 'S':
                var_units = _no_units
        else:
            if var_units:
                _canonical_units[identity] = [var_units]                
            elif not strict_units or variable.dtype.kind == 'S':
                var_units = _no_units
        #--- End: if

        # Still here?
        return var_units
    #--- End: def

    def canonical_cell_methods(self, rtol=None, atol=None):
        '''

Updates the `_canonical_cell_methods` attribute.

:Parameters:

    atol : float

    rtol : float

:Returns:

    out : CellMethods or None

**Examples**

'''
        cell_methods = getattr(self.field, 'cell_methods', None)

        if cell_methods is None:
            return None

        _canonical_cell_methods = self._canonical_cell_methods

        for cm in _canonical_cell_methods:
            if cell_methods.equivalent(cm, rtol=rtol, atol=atol):
                return cm
        #--- End: for
               
        # Still here?
        _canonical_cell_methods.append(cell_methods)

        return cell_methods
    #--- End: def

    def cell_measure_has_data_and_units(self, cm):
        '''

:Parameters:

    cm : CellMethods

:Returns:

    out : bool

**Examples**

'''
        if cm.Units and cm.hasData:
            return True
        
        if self.messages:
            print("Cell measure has no units or no data: %s" %
                  repr(self.field))
    #--- End: def

    def coord_has_identity_and_data(self, coord):
        '''

:Parameters:

    coord : Coordinate

:Returns:

    out : str or None

**Examples**

'''
        identity = coord.identity()


        if identity is not None:
            all_identities = self.all_identities

            if identity in all_identities:
                if self.messages:
                    print("Field has duplicate '%s' coordinates: %s" %
                          (identity, repr(self.field)))
                return None
            #--- End: if

            if coord.hasData or (coord.isbounded and coord.bounds.hasData):
                all_identities.add(identity)
                return identity
        #--- End: if

        if self.messages:
            print("Coordinate has no identity or no data: %s" %
                  repr(self.field))
            
        # Return None
        return None
    #--- End: def

    def find_ancillary_variables(self):
        '''

:Returns:

    out : dict or None

**Examples**

'''
        field = self.field

        if not hasattr(field, 'ancillary_variables'):
            return {}

        ancillary_variables = {}
        for f in field.ancillary_variables:
            identity = f.identity()
            if identity in ancillary_variables:
                if self.messages:
                    print("Field has duplicate '%s' ancillary variables: %s" %
                          (identity, repr(self.field)))

                return None
            #--- End: if
            ancillary_variables[identity] = f
        #--- End: for

        return ancillary_variables
    #--- End: def

    def structural_signature(self, global_properties=None,
                             ignore_properties=None):
        '''

:Returns:

    out : tuple

**Examples**

'''
        f = self.field    
      
        # Initialize the structual signature the presence of a data array,
        # the identity, the canonical units and the canonical cell methods
        signature = [self.hasData, self.identity, self.units, self.cell_methods]
        
        # Add properties
        signature.append(self.equal_properties)
        
        signature.append(self.exist_properties)

        # Add Flags
        signature.append(getattr(f, 'Flags', None))
        
        # Add valid_min, valid_max, valid_range 
        signature.extend((getattr(f, 'valid_min'  , None),
                          getattr(f, 'valid_max'  , None),
                          getattr(f, 'valid_range', None)))

        # Add standard_error_multiplier
        signature.append(getattr(f, 'standard_error_multiplier', None))
        
        # Add ancillary variables to the structural signature
        if self.ancillary_variables:
            signature.append(tuple(sorted(self.ancillary_variables)))
        else:
            signature.append(None)
        
        # Add 1-d dimensions and 1-d auxiliary coordinates
        x = [(self.dim[identity]['ids'],
              self.dim[identity]['units'],
              self.dim[identity]['isbounded'],
              self.dim[identity]['transforms']) for identity in self.dim_ids]
        signature.append(tuple(x))
        
        x = [False if self.dim[identity]['dim_coord_index'] is None else True
             for identity in self.dim_ids]
        signature.append(tuple(x))
        
        # Add N-d auxiliary coordinates
        x = [(identity,
              self.aux[identity]['units'],
              self.aux[identity]['dim_ids'],
              self.aux[identity]['isbounded'],
              self.aux[identity]['transforms']) for identity in sorted(self.aux)]
        signature.append(tuple(x))
        
        # Add cell measures
        x = [(units,
              self.cm[units]['dim_ids']) for units in sorted(self.cm)]
        signature.append(tuple(x))
        
        self.signature = tuple(signature)
    #--- End: def

    def find_transforms(self, coord):
        '''

:Parameters:

    coord : Coordinate

:Returns:

    out : tuple or None

**Examples**

'''
        if not hasattr(coord, 'transforms'):
            return None
        
        transforms = self.space.transforms
        transforms = [transforms[t] for t in coord.transforms]

        # Formula terms        
        formula_terms = [t for t in transforms if not t.isgrid_mapping]
        if formula_terms:
            formula_terms = tuple(sorted([t.name for t in formula_terms]))
        else:
            formula_terms = ()

        # Grid mappings
        grid_mappings = [t for t in transforms if t.isgrid_mapping]
        if grid_mappings:
            grid_mappings.sort(key=attrgetter('name'))
            grid_mappings = tuple([tuple(sorted(t.items())) for t in grid_mappings])
            return formula_terms + grid_mappings
        #--- End: if

        return formula_terms
    #--- End: def

#--- End: class

##def _structural_signature(f, messages=False, rtol=None, atol=None,
##                          strict_units=True, strict_identity=True):
##    '''
##
##:Parameters:
##
##    f : Field
##        Create a structural signature for this field.
##    
##    messages : bool
##        If True then print informative messages. By default, don't.
##
##    strict_units : bool
##
##    strict_units : bool, optional
##        If False then assume that fields or their components (such as
##        coordinates) with the same identity but missing units all have
##        equivalent (but unspecified) units, so that aggregation may
##        occur. By default such fields are not aggregatable.
##
##    strict_identity : bool, optional
##        If False then treat fields with data arrays but no identities
##        as having equal identities, and so are potentially
##        aggregatable. By default fields with data arrays but no
##        identities are not aggregatable.
##
##:Returns:
##
##    out : _Meta
##
##'''
##    # Create the metadata summary
##    m = _Meta(f, messages=messages, rtol=rtol, atol=atol,
##              strict_units=strict_units, strict_identity=strict_identity)
##    if not m:
##        return    
##
##    # ----------------------------------------------------------------
##    # Still here? Then create the structural signature.
##    # ----------------------------------------------------------------
##
##    # Initialize the structual signature the presence of a data array,
##    # the identity, the canonical units and the canonical cell methods
##    signature = [m.hasData, m.identity, m.units, m.cell_methods]
##    
##    # Add ancillary variables
##    if m.ancillary_variables:
##        signature.append(tuple(sorted(m.ancillary_variables)))
##    else:
##        signature.append(None)
##    
##    # Add 1-d dimensions and 1-d auxiliary coordinates
##    x = [(m.dim[identity]['ids'],
##          m.dim[identity]['units'],
##          m.dim[identity]['isbounded'],
##          m.dim[identity]['transforms']) for identity in m.dim_ids]
##    signature.append(tuple(x))
##    
##    x = [False if m.dim[identity]['dim_coord_index'] is None else True
##         for identity in m.dim_ids]
##    signature.append(tuple(x))
##    
##    # Add N-d auxiliary coordinates
##    x = [(identity,
##          m.aux[identity]['units'],
##          m.aux[identity]['dim_ids'],
##          m.aux[identity]['isbounded'],
##          m.aux[identity]['transforms']) for identity in sorted(m.aux)]
##    signature.append(tuple(x))
##    
##    # Add cell measures
##    x = [(units,
##          m.cm[units]['dim_ids']) for units in sorted(m.cm)]
##    signature.append(tuple(x))
##    
##    m.signature = tuple(signature)
##
##    return m
###--- End: def

def _create_hash_and_first_values(meta):
    '''

Updates each field's _Meta object.

:Parameters:

    meta : list of _Meta

:Returns:

    None

'''
    for m in meta:

        calculate_hash_values = m.calculate_hash_values
        if not calculate_hash_values:
            return

        space = m.space

        # --------------------------------------------------------
        # Create a hash value for each metadata array
        # --------------------------------------------------------
        
        # --------------------------------------------------------
        # 1-d coordinates
        # --------------------------------------------------------
        for identity in calculate_hash_values:

            m_dim_identity = m.dim[identity]

            # Find the sort indices for this dimension ...
            dim = m.id_to_dim[identity]
            if dim in space:
                # ... which has a 1-d dimension coordinate
                has_dimension_coord = True
                m.sort_keys[dim] = dim
                if not space[dim].Data.direction[dim]:
                    sort_indices = slice(None, None, -1)
                else:
                    sort_indices = slice(None)
            else:
                # ... which doesn't have a dimension coordinate but
                #     does have one or more 1-d auxiliary coordinates
                has_dimension_coord = False
                aux = m_dim_identity['keys'][0]
                sort_indices = numpy.argsort(space[aux].array)
                m.sort_keys[dim] = aux
            #-- End: if
            m.sort_indices[dim] = sort_indices
            
            hash_values  = []
            first_values = []
            last_values  = []
            first_bounds = []
            last_bounds  = []

            for key, canonical_units in zip(m_dim_identity['keys'],
                                            m_dim_identity['units']):
                
                if has_dimension_coord:
                    coord = space[key].subspace[sort_indices]
                else:
                    # At least one diimensions sort indices is not a
                    # strictly monotonic sequence.
                    coord = space[key].copy()
                    varray = coord.varray
                    varray[...] = varray[sort_indices]
                #--- End: if

                coord.Units = canonical_units

                if coord.dtype.kind != 'S':
                    coord.dtype = numpy.dtype(float)

                hash_value = hash(coord.Data)

                first_values.append(coord.Data[0])
                last_values.append(coord.Data[-1])

                if coord.isbounded:
                    hash_value = (hash_value, hash(coord.bounds.Data))

                    if key[:3] == 'dim':
                        # Record the bounds of the first and last
                        # (sorted) cells of a dimension coordinate.
                        m.first_bounds[identity] = sorted(coord.bounds.Data[ 0, :].array[0,:])
                        m.last_bounds[identity]  = sorted(coord.bounds.Data[-1, :].array[0,:])
                #--- End: if
                    
                hash_values.append(hash_value)
            #--- End: for
                
            m.hash_values[identity]  = hash_values
            m.first_values[identity] = first_values
            m.last_values[identity]  = last_values
        #--- End: for

        # ------------------------------------------------------------
        # N-d auxiliary coordinates
        # ------------------------------------------------------------
        for identity in m.aux:
            aux = m.aux[identity]
            dim_ids = aux['dim_ids']
            if not calculate_hash_values.intersection(dim_ids):
                continue

            key = aux['key']
            
            sort_indices = tuple(
                [m.sort_indices[dim] for dim in space.dimensions[key]])
            try:
                coord = space[key].subspace[sort_indices]
            except ValueError:
                # At least one dimensions sort indices is not a
                # strictly monotonic sequence.
                coord = space[key].copy()
                varray = coord.varray
                varray[...] = varray[sort_indices]
            #--- End: try
                
            coord.Units = aux['units']

            axes = [m.id_to_dim[identity] for identity in dim_ids]
            coord.transpose(axes)

            if coord.dtype.kind != 'S':
                coord.dtype = numpy.dtype(float)

            hash_value = hash(coord.Data)
            if coord.isbounded:
                hash_value = (hash_value, hash(coord.bounds.Data))
                
            aux['hash_value'] = hash_value
        #--- End: for
            
        # ------------------------------------------------------------
        # Cell measures
        # ------------------------------------------------------------
        for units in m.cm:
            cm = m.cm[units]

            hash_values = []
            for key, dim_ids, hash_value in zip(cm['keys'],
                                                cm['dim_ids'],
                                                cm['hash_values']):
                if not calculate_hash_values.intersection(dim_ids):
                    hash_values.append(hash_value)
                    continue

                sort_indices = tuple(
                    [m.sort_indices[dim] for dim in space.dimensions[key]])
                try:
                    cmv = space[key].subspace[sort_indices]
                except ValueError:
                    # At least one dimensions sort indices is not a
                    # strictly monotonic sequence.
                    cmv = space[key].copy()
                    varray = coord.varray
                    varray[...] = varray[sort_indices]
            #--- End: try

                cmv.Units = units

                axes = [m.id_to_dim[identity] for identity in dim_ids]
                cmv.transpose(axes)

                if cmv.dtype.kind != 'S':
                    cmv.dtype = numpy.dtype(float)

                hash_values.append(hash(cmv.Data))
            #--- End: for

            cm['hash_values'] = hash_values
        #--- End: for

    m.calculate_hash_values = set()

    #--- End: for

#--- End: def

def _group_fields(meta):
    '''
:Parameters:

    meta : list of _Meta

:Returns:

    out : list of FieldLists

'''
    if meta[0].dim_ids:
        sort_by_dim_ids = itemgetter(*meta[0].dim_ids)
        def _hash_values(m):
            return sort_by_dim_ids(m.hash_values)

        meta.sort(key=_hash_values)
    #--- End: if

    # Create a new group of potentially aggregatable fields (which
    # contains the first field in the sorted list)
    groups_of_fields = [[meta[0]]]

    for m0, m1 in zip(meta[:-1], meta[1:]):

        #-------------------------------------------------------------
        # Count the number of dimensions which are different between
        # the two fields
        # -------------------------------------------------------------
        count = 0
        hash0 = m0.hash_values
        hash1 = m1.hash_values
        for identity in m0.dim_ids:
            if hash0[identity] != hash1[identity]:
                count += 1
                a_identity = identity                
        #--- End: for

        ok = True
        if count == 1:
            # --------------------------------------------------------
            # Exactly one dimension has different 1-d coordinate
            # values
            # --------------------------------------------------------
            # Check N-d auxiliary coordinates
            for identity in m0.aux:
                if a_identity in m0.aux[identity]['dim_ids']:
                    continue

                if m0.aux[identity]['hash_value'] != m1.aux[identity]['hash_value']:
                    # There are N-d auxiliary coordinates which do not
                    # span the aggregating dimension and have
                    # different values
                    ok = False
                    break
            #--- End: for

            if not ok:
                break

            # Check cell measures
            for units in m0.cm:
                if a_identity in m0.cm[units]['dim_ids']:
                    continue
                
                if m0.cm[units]['hash_values'] != m1.cm[units]['hash_values']:
                    # There are cell measures which do not span the
                    # aggregating dimension and have different values
                    ok = False
                    break
            #--- End: for

            if not ok:
                break

            m0.a_identity = a_identity
            m1.a_identity = a_identity


        elif not count:   
            # --------------------------------------------------------
            # No dimensions have different 1-d coordinate values
            # --------------------------------------------------------
            ok = False
            if m0.messages:
                print("'%s' fields have identical 1-d coordinate values" %
                      m0.identity)

        else:   
            # --------------------------------------------------------
            # 2 or more dimensions have different 1-d coordinate
            # values
            # --------------------------------------------------------
            ok = False
        #--- End: if

        if ok:
            # Append field1 to this group of potentially aggregatable
            # fields
            groups_of_fields[-1].append(m1)
        else:
            # Create a new group of potentially aggregatable fields
            # (which contains field1)
            groups_of_fields.append([m1])
    #--- End: for

    return groups_of_fields
#--- End: def

def _sorted_by_first_values(meta):
    '''

Sort fields inplace

:Parameters:

    meta : list of _Meta

:Returns:

    None

''' 
    sort_by_dim_ids = itemgetter(*meta[0].dim_ids)
    def _first_values(m):
        return sort_by_dim_ids(m.first_values)

    meta.sort(key=_first_values)
#--- End: def

def _ok_coordinate_arrays(meta, overlap, contiguous):
    '''

Return True if the aggregating dimension's 1-d coordinates are all
aggregatable.

It is assumed that the input metadata objects have already been sorted
by the canonical first values of their 1-d coordinates.

:Parameters:

    meta : list of _Meta

    overlap : bool

    contiguous : bool

:Returns:

    out : bool

**Examples**

>>> if not _ok_coordinate_arrays(meta, True, False)
...     print "Don't aggregate"


'''
    m = meta[0]

    # Find the canonical identity of the aggregating dimension
    a_identity = m.a_identity

    dim_coord_index = m.dim[a_identity]['dim_coord_index']
    if dim_coord_index is not None:
        # ------------------------------------------------------------
        # The aggregating dimension has a dimension coordinate
        # ------------------------------------------------------------
        # Check for overlapping dimension coordinate cell centres
        dim_coord_index0 = dim_coord_index
        for m0, m1 in zip(meta[:-1], meta[1:]):
            dim_coord_index1 = m1.dim[a_identity]['dim_coord_index']
            if (m0.last_values[a_identity][dim_coord_index0] >=
                m1.first_values[a_identity][dim_coord_index1]):
                # Found overlap
                return

            dim_coord_index0 = dim_coord_index1        
        #--- End: for

        if a_identity in m.first_bounds:
            # --------------------------------------------------------
            # The dimension coordinates have bounds
            # --------------------------------------------------------
            if not overlap:
                for m0, m1 in zip(meta[:-1], meta[1:]):
                    if (m1.first_bounds[a_identity][0] <
                        m0.last_bounds[a_identity][1]):
                        # Do not aggregate anything in this group
                        # because overlapping has been disallowed and
                        # the first cell from field1 overlaps with the
                        # last cell from field0.
                        if m.messages:
                            print(
"Won't aggregate %s: overlap=False and group has overlapping '%s' coordinates" %
(repr(m.field), m.dim[a_identity]['ids'][dim_coord_index]))
                        return
                #--- End: for

            else:
                for m0, m1 in zip(meta[:-1], meta[1:]):
                    m0_last_bounds  = m0.last_bounds[a_identity]        
                    m1_first_bounds = m1.first_bounds[a_identity]
                    if (m1_first_bounds[0] <= m0_last_bounds[0] or
                        m1_first_bounds[1] <= m0_last_bounds[1]):
                        # Do not aggregate anything in this group
                        # because, even though overlapping has been
                        # allowed, the first cell from field1 overlaps
                        # in an unreasonable way with the last cell
                        # from field0.
                        if m.messages:
                            print(
"Can't aggregate %s: Group has unreasonably overlapping '%s' coordinates" %
(repr(m.field), m.dim[a_identity]['ids'][dim_coord_index]))
                        return
                #--- End: for
            #--- End: if

            if contiguous:
                for m0, m1 in zip(meta[:-1], meta[1:]):
                    if (m0.last_bounds[a_identity][1] <
                        m1.first_bounds[a_identity][0]):
                        # Do not aggregate anything in this group
                        # because contiguous coordinates have been
                        # specified and the first cell from field1 is
                        # not contiguous with the last cell from
                        # field0.
                        if m.messages:
                            print(
"Won't aggregate %s: contiguous=True and group has noncontiguous '%s' coordinates" %
(repr(m.field), m.dim[a_identity]['ids'][dim_coord_index]))
                        return
                #--- End: for

        elif contiguous:
            # --------------------------------------------------------
            # The dimension coordinates have no bounds and contiguous
            # coordinates have been specified, so do not aggregate
            # anything in this group.
            # --------------------------------------------------------
            if m.messages:
                print(
"Won't aggregate %s: contiguous=True and group has noncontiguous '%s' coordinates" %
(repr(m.field), m.dim[a_identity]['ids'][dim_coord_index]))
            return
        #--- End: if

    else:
        # ------------------------------------------------------------
        # The aggregating dimension does not have a dimension
        # coordinate but does have at least one 1-d auxiliary
        # coordinate
        # ------------------------------------------------------------
        # Check for duplicate auxiliary coordinate values
        for i in xrange(len(m.dim[a_identity]['keys'])):
            set_of_1d_aux_coord_values    = set()
            number_of_1d_aux_coord_values = 0
            for field in fields:
                aux = field._meta.dim[a_identity]['keys'][i]
                array = field.space[aux].array
                set_of_1d_aux_coord_values.update(array)
                number_of_1d_aux_coord_values += array.size
            #--- End: for

            if len(set_of_1d_aux_coord_values) != number_of_1d_aux_coord_values:
                # Found duplicate auxiliary coordinate values
                return
    #--- End: if
 
    # Still here? Then the aggregating dimension does not overlap
    # between any of the fields.
    return True
#--- End: def

def _aggregate_2_fields(m0, m1, rtol=None, atol=None, messages=False,    
                        strict_units=True, overlap=True, contiguous=False,
                        concatenate=True):
    '''

:Parameters:

    m0 : _Meta

    m1 : _Meta

    overlap : bool, optional
        See the `aggregate` function for details.

    contiguous : bool, optional
        See the `aggregate` function for details.
   
    rtol : float, optional
        See the `aggregate` function for details.

    atol : float, optional
        See the `aggregate` function for details.
   
    messages : bool, optional
        See the `aggregate` function for details.
   
    strict_units : bool, optional
        See the `aggregate` function for details.

    strict_identity : bool, optional
        See the `aggregate` function for details.
   
''' 
    a_identity = m0.a_identity
    
    # ----------------------------------------------------------------
    # Aggregate transforms
    # ----------------------------------------------------------------
    if m0.formula_terms:
        t = _aggregate_transforms(m0, m1, rtol=rtol, atol=atol,
                                  strict_units=strict_units,
                                  overlap=overlap, messages=messages,
                                  contiguous=contiguous)
        if not t:
            return
    else:
        t = None
  
    # ----------------------------------------------------------------
    # Aggregate ancillary variables
    # ----------------------------------------------------------------
    if m0.ancillary_variables:
        av = _aggregate_ancillary_variables(m0, m1, rtol=rtol,
                                            atol=atol,
                                            strict_units=strict_units,
                                            overlap=overlap, messages=messages,
                                            contiguous=contiguous)
        if not av:
            return
    else:
        av = None
 
    # Still here?
    if t:
        m0.space.transforms.update(t)

    if av:
        m0.field.ancillary_variables = av

    space0 = m0.space
    space1 = m1.space

    # ----------------------------------------------------------------
    # Map the dimensions of field1 to those of field0
    # ----------------------------------------------------------------
    dim1_name_map = {'bounds': 'bounds'}
    for identity in m0.dim_ids:
        dim1_name_map[m1.id_to_dim[identity]] = m0.id_to_dim[identity]
    
    # ----------------------------------------------------------------
    # In each field, find the key of the aggregating dimension and its
    # direction
    # ----------------------------------------------------------------
    adim0 = m0.id_to_dim[a_identity]
    adim1 = m1.id_to_dim[a_identity]

    adim_direction0 = space0.direction(adim0)
    adim_direction1 = space1.direction(adim1)

    # ----------------------------------------------------------------
    # Find the insertion indices into field0 for each aggregating
    # dimension partition of field1
    # ----------------------------------------------------------------
    psize1 = space1[m1.sort_keys[adim1]].Data.psize
    if adim_direction0:
        psize0 = space0[m0.sort_keys[adim0]].Data.psize
        if adim_direction1:
            indices = range(psize0, psize0+psize1)
        else:
            indices = (psize0,) * psize1
    else:
        if not adim_direction1:
            indices = range(psize1)
        else:
            indices = (0,) * psize1
    #--- End: if
            
    # ----------------------------------------------------------------
    # Find matching pairs of coordinate and cell measure variables
    # which span the aggregating dimension
    # ----------------------------------------------------------------
    spanning_variables = [(space0[key0], space1[key1])
                          for key0, key1 in zip(m0.dim[a_identity]['keys'],
                                                m1.dim[a_identity]['keys'])]
    
    for identity in m0.aux:
        if a_identity in m0.aux[identity]['dim_ids']:
            key0 = m0.aux[identity]['key']
            key1 = m1.aux[identity]['key']
            spanning_variables.append((space0[key0], space1[key1]))
    #--- End: for
    
    for units in m0.cm:
        for dim_ids, key0, key1 in zip(m0.cm[units]['dim_ids'],
                                       m0.cm[units]['keys'],
                                       m1.cm[units]['keys']):
            if a_identity in dim_ids:
                spanning_variables.append((space0[key0], space1[key1]))
    #--- End: for

                
    # ----------------------------------------------------------------
    # ... This is important, as it is the flag for the hash
    # values of everything spanning the aggregating dimension to be
    # recalculated on the next pass.
    # ----------------------------------------------------------------
    m0.calculate_hash_values.add(a_identity)
               
    # ----------------------------------------------------------------
    # For each matching pair of coordinates and cell measures which
    # spans the aggregating dimension, insert the one from field1 into
    # the one from field0
    # ----------------------------------------------------------------
    for variable0, variable1 in spanning_variables:
        variable0._insert_data(variable1, indices,
                               adim0, adim_direction0, dim1_name_map)
    #--- End: for        
        
    # ----------------------------------------------------------------
    # Insert the data array from field1 into the data array of field0
    # ----------------------------------------------------------------
    field0 = m0.field
    field1 = m1.field

    if m0.hasData:
        field0._insert_data(field1, indices,
                            adim0, adim_direction0, dim1_name_map)
        
        # Update the data dimensions in field0
        space0.dimensions['data'] = field0.Data.dimensions[:]
    #--- End: if

    # Update the Aggregating Dimension size in field0
    space0.dimension_sizes[adim0] += space1.dimension_sizes[adim1]

    # Make sure that field0 has a standard_name, if possible.
    if getattr(field0, 'id', None) is not None:
        standard_name = getattr(field1, 'standard_name', None)
        if standard_name is not None:
            field0.standard_name = standard_name
            del field0.id
    #--- End: if

    # Update the properties in field0
    for prop in m0.properties | m1.properties:
        value0 = field0.getprop(prop, None)
        value1 = field1.getprop(prop, None)
        if equals(value0, value1):
            continue

        if concatenate:
            if value1 is not None:
                if value0 is not None:
                    field0.setprop(prop, '%s :AGGREGATED: %s' % (value0, value1))
                else:
                    field0.setprop(prop, ' :AGGREGATED: %s' % value1)
        else:
            m0.properties.discard(prop)
            if value0 is not None:
                field0.delprop(prop)
    #--- End: for

    # Update the attributes in field0
    for attr in m0.attributes | m1.attributes:
        value0 = getattr(field0, attr, None)
        value1 = getattr(field1, attr, None)
        if equals(value0, value1):
            continue

        if concatenate:
            if value1 is not None:
                if value0 is not None:
                    setattr(field0, attr, '%s :AGGREGATED: %s' % (value0, value1))
                else:
                    setattr(field0, attr, ' :AGGREGATED: %s' % value1)
        else:
            m0.attributes.discard(attr)
            if value0 is not None:
                delattr(field0, attr)
    #--- End: for

    # ----------------------------------------------------------------
    # Return the aggregated field
    # ----------------------------------------------------------------
    return m0
#--- End: def

def _aggregate_transforms(m0, m1, rtol=None, atol=None, 
                          strict_units=True, overlap=True,
                          messages=False, contiguous=False):
    '''
aggregate 2 fields transforms

:Parameters:

    m0 : _Meta

    m1 : _Meta

    overlap : bool, optional
        See the `aggregate` function for details.

    contiguous : bool, optional
        See the `aggregate` function for details.
   
    rtol : float, optional
        See the `aggregate` function for details.

    atol : float, optional
        See the `aggregate` function for details.
   
    messages : bool, optional
        See the `aggregate` function for details.
   
    strict_units : bool, optional
        See the `aggregate` function for details.

:Returns:

    out : bool

'''
    a_identity = m0.a_identity

    # Check formula_terms fields
    for name, transform0 in m0.formula_terms.iteritems():
        transform1 = m1.formula_terms[name]

        new_transform = Transform()
        new_transform.name = transform0.name
          
        for term, value1 in transform1.iteritems():
            if (term not in transform0 and
                not (value1.isscalar and value1.first_datum == 0.0)):
                if messages:
                    print("Won't aggregate %s: Incompatible '%s' transform" %
                          (repr(m0.field), name))
                return
        #--- End: for

        for term, value0 in transform0.iteritems():

            if (term not in transform1 and
                not (value0.isscalar and value0.first_datum == 0.0)):
                if messages:
                    print(
"Won't aggregate %s: Incompatible '%s' transform term '%s'" %
(repr(m0.field), name, term))
                return
            #--- End: if

            value1 = transform1[term]
  
            if (isinstance(value0, basestring) and
                isinstance(value1, basestring)):
                # ----------------------------------------------------
                # Both transform values are pointers to coordinates in
                # their respective fields
                # ----------------------------------------------------
                new_transform[term] = value0
                continue
            #--- End: if
           
            if isinstance(value0, basestring) or isinstance(value1, basestring):
                # One transform value is a pointer to a coordinate and
                # the other is not
                if messages:
                    print(
"Won't aggregate %s: Incompatible '%s' transform term '%s'" %
(repr(m0.field), name, term))
                return
            #--- End: if
                        
            # --------------------------------------------------------
            # Both transform values are fields
            # --------------------------------------------------------
            x0 = _Meta(value0, messages=messages, strict_units=strict_units,
                       strict_identity=False)
            x1 = _Meta(value1, messages=messages, strict_units=strict_units,
                       strict_identity=False)

            if not (x0 and x1):
                # at least one field doesn't have a structual
                # signature.
                if messages:
                    print(
"Won't aggregate %s: Incompatible '%s' transform term '%s'" %
(repr(m0.field), name, term))
                return
            #--- End: if

            if a_identity not in x0.dim and a_identity not in x1.dim:
                # Neither field spans the aggregating dimension ...
                if value0.equivalent_data(value1, rtol=rtol, atol=atol):
                    # ... and the fields have equivalent data
                    new_transform[term] = value0
                    continue
                else:
                    # ... and the fields do not have equivalent data
                    if messages:
                        print(
"Won't aggregate %s: Incompatible '%s' transform term '%s'" %
(repr(m0.field), name, term))
                    return
            #--- End: if

            if not (a_identity in x0.dim and a_identity in x1.dim):
                # Only one of the fields spans the aggregating
                # dimension
                if messages:
                    print(
"Won't aggregate %s: Incompatible '%s' transform term '%s'" %
(repr(m0.field), name, term))
                return
            #--- End: if
            
            # Both fields span the aggregating dimension. So let's
            # aggregate them.
            new_value = aggregate((value0, value1), messages=messages,
                                  overlap=overlap, contiguous=contiguous,
                                  strict_units=strict_units,
                                  strict_identity=False)
            
            if len(new_value) == 2:
                # Couldn't aggregate the two formula_terms fields (because we
                # got two back instead of one).
                if messages:
                    print(
" X Won't aggregate %s: Incompatible '%s' transform term '%s'" %
(repr(m0.field), name, term))
                return
            #--- End: if

            # Successfully aggregated the transform fields
            transform0[term] = new_value[0]
            new_transform[term] = new_value[0]
        #---End: for

    #---End: for

    return {m0.transform_ids[name]: new_transform} #True
#--- End: def

def _aggregate_ancillary_variables(m0, m1, rtol=None, atol=None,
                                   strict_units=True, overlap=True,
                                   messages=False, contiguous=False):
    '''
aggregate 2 fields' ancillary variables

:Parameters:

    m0 : _Meta

    m1 : _Meta

    overlap : bool, optional
        See the `aggregate` function for details.

    contiguous : bool, optional
        See the `aggregate` function for details.
   
    rtol : float, optional
        See the `aggregate` function for details.

    atol : float, optional
        See the `aggregate` function for details.
   
    messages : bool, optional
        See the `aggregate` function for details.
   
    strict_units : bool, optional
        See the `aggregate` function for details.

:Returns:

    out : bool

'''
    a_identity = m0.a_identity

    new_ancillary_variables = AncillaryVariables()

    # Check formula_terms fields
    for identity, field0 in m0.ancillary_variables.iteritems():
        field1 = m1.ancillary_variables[identity]

        x0 = _Meta(field0, messages=messages, strict_units=strict_units,
                   strict_identity=True)
        x1 = _Meta(field1, messages=messages, strict_units=strict_units,
                   strict_identity=True)
        
        if not (x0 and x1):
            # At least one field doesn't have a structual signature.
            if messages:
                print(
                    "Won't aggregate %s: Incompatible '%s' ancillary variable" %
                    (repr(m0.field), identity))
            return
        #--- End: if
            
        if a_identity not in x0.dim and a_identity not in x1.dim:
            # Neither field spans the aggregating dimension ...
            if field0.equivalent_data(field1, rtol=rtol, atol=atol):
                # ... and the fields are equivalent
                new_ancillary_variables.append(field0)
                continue

            # ... and the fields are not equivalent
            if messages:
                print(
"A matching pair of ancillary variables which don't span the aggregating dimension have different values")
                print(
                    "Won't aggregate %s: Incompatible '%s' ancillary variable" %
                    (repr(m0.field), identity))
                return
        #--- End: if

        if not (a_identity in x0.dim and a_identity in x1.dim):
            if messages:
                print(
"Only one of a matching pair of ancillary variables spans the aggregating dimension")
                print(
                    "Won't aggregate %s: Incompatible '%s' ancillary variable" %
                    (repr(m0.field), identity))
            return
        #--- End: if
            
        # Both fields span the aggregating dimension. So let's
        # aggregate them.
        new_value = aggregate((field0, field1), messages=messages, 
                              overlap=overlap, contiguous=contiguous,
                              strict_units=strict_units, strict_identity=True)
        
        if len(new_value) == 2:
            # Couldn't aggregate the two formula_terms fields (because we
            # got two back instead of one).
            if messages:
                print(
                    "Won't aggregate %s: Incompatible '%s' ancillary variable" %
                    (repr(m0.field), identity))
            return
        #--- End: if

        m0.ancillary_variables[identity] = new_value[0]
        new_ancillary_variables.append(new_value[0])
    #---End: for

    return new_ancillary_variables
#--- End: def

def aggregate(fields, messages=False, strict_units=True, overlap=True,
              contiguous=False, _delete_meta=True, strict_identity=True,
              equal_all=False, equal_ignore=(), equal=(),
              exist_all=False, exist_ignore=(), exist=(),
              concatenate=True, debug=0):
    '''

Aggregate fields into as few fields as possible.

The aggregation of fields may be thought of as the combination fields
into each other to create a new field that occupies a larger space.

Using the CF aggregation rules, input fields are separated into
aggregatable groups and each group (which may contain just one field)
is then aggregated to a single field. These aggregated fields are
returned in a field list.


:Parameters:

    fields : (sequence of) Field or FieldList
        The field or fields to aggregated.

    messages : bool, optional
        If True then print messages giving reasons why a field has not
        been aggregated.

    overlap : bool, optional
        If False then a necessary condition for the aggregation of two
        fields is that their two adjacent dimension coordinate cells
        do not overlap each other. By default aggregation may occur in
        this case. See the `contiguous` parameter.

    contiguous : bool, optional
        If True then a necessary condition for the aggregation of two
        fields is that their two adjacent dimension coordinate cells
        partially overlap or share a common boundary value. By default
        aggregation may occur if this is not the case. See the
        `overlap` parameter.

    strict_units : bool, optional
        If False then assume that fields or their components (such as
        coordinates) with the same identity but missing units all have
        equivalent (but unspecified) units, so that aggregation may
        occur. By default such fields are not aggregatable.

    strict_identity : bool, optional
        If False then treat fields with data arrays but no identities
        as having equal identities, and so are potentially
        aggregatable. By default fields with data arrays but no
        identities are not aggregatable.

    equal_all : bool, optional
        If True then a necessary condition for the aggregation of two
        fields is that the they have the same set of non-standard CF
        properties, with pair-wise equal values. By default the
        non-standard CF properties do not affect whether or not two
        fields may be aggregated. See the `concatenate` parameter.

    equal_ignore : (sequence of) str, optional
        Specify CF properties to omit from any properties specified by
        or implied by the `equal_all` and `equal` parameters.

    equal : (sequence of) str, optional
        Specify CF properties so that for each property, a necessary
        condition for the aggregation of two fields is that each
        property is present in either neither or both of the fields
        and in the latter case the property values are equal. By
        default the non-standard CF properties do not affect whether
        or not two fields may be aggregated. See the `concatenate`
        parameter.

    exist_all : bool, optional
        If True then a necessary condition for the aggregation of two
        fields is that is that the they have the same set of
        non-standard CF properties, with no restriction on their
        values. By default the non-standard CF properties do not
        affect whether or not two fields may be aggregated. See the
        `concatenate` parameter.

    exist_ignore : (sequence of) str, optional
        Specify CF properties to omit from the properties specified by
        or implied by the `exist_all` and `exist` parameters.

    exist : (sequence of) str, optional
        Specify CF properties so that for each property, a necessary
        condition for the aggregation of two fields is that each
        property is present in either neither or both of the fields,
        with no restriction on their values if they exist. By default
        the non-standard CF properties do not affect whether or not
        two fields may be aggregated. See the `concatenate` parameter.

    concatenate : bool, optional
        If False then a CF property is omitted from an aggregated
        field if the property has unequal values across constituent
        fields or is missing from at least one constituent field. By
        default a CF property in an aggregated field is the
        concatenated collection of the distinct values from the
        constituent fields, delimited with the string:

        " :AGGREGATED: "

    debug : int, optional
        Print debugging information for each input field. If 1 then
        print each field's structural signature. If 2 the print the
        entire metadata summary for each field, which includes the
        structural signature. By default no debugging information is
        printed.

:Returns:

    out : FieldList
        The aggregated fields.
    
**Examples**

The following six fields comprise eastward wind at two different times
and for three different atmospheric heights for each time:

>>> f
[<CF Field: eastward_wind(latitude(73), longitude(96)>,
 <CF Field: eastward_wind(latitude(73), longitude(96)>,
 <CF Field: eastward_wind(latitude(73), longitude(96)>,
 <CF Field: eastward_wind(latitude(73), longitude(96)>,
 <CF Field: eastward_wind(latitude(73), longitude(96)>,
 <CF Field: eastward_wind(latitude(73), longitude(96)>]
>>> cf.aggregate(f)
[<CF Field: eastward_wind(height(3), time(2), latitude(73), longitude(96)>]

'''
    fields = deepcopy(fields)
    
    output_fields = FieldList()

    atol = ATOL()
    rtol = RTOL()

    history = '%s Aggregated by cf-python v%s' % \
        (time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime()),
         __version__)

    # Parse parameters
    properties = {'equal': equal, 'equal_ignore': equal,
                  'exist': exist, 'exist_ignore': exist}

    for key, value in properties.iteritems():
        if not value:
            continue

        if isinstance(equal, basestring):
            # If it is a string then convert to a single element
            # sequence
            properties[key] = (value,)
        else:
            try:
                value[0]
            except TypeError:
                raise TypeError("Bad type of '%s' parameter: %s" % (key, value))
    #--- End: for

    equal        = properties['equal']
    exist        = properties['exist']
    equal_ignore = properties['equal_ignore']
    exist_ignore = properties['exist_ignore']

    # ----------------------------------------------------------------
    # Group fields with the same structural signature
    # ----------------------------------------------------------------
    signatures = {}
    for f in flat(fields):

        # Create the metadata summary
        meta = _Meta(f, messages=messages, rtol=rtol, atol=atol,
                     strict_units=strict_units, strict_identity=strict_identity,
                     equal_all=equal_all, equal_ignore=equal_ignore,
                     equal=equal,
                     exist_all=exist_all, exist_ignore=exist_ignore,
                     exist=exist)

        if not meta:
            # Can't find a structural signature for this field, so it
            # can't be aggregated. Put it straight into the output
            # list and move on to the next input field.
            output_fields.append(f)
            continue
        #--- End: if

        # Append this field to the list of fields with the same
        # structural signature
        signature = meta.signature
        if signature not in signatures:
            signatures[signature] = []

        signatures[signature].append(meta)

        if debug:
            if hasattr(meta.field, 'file'):
                print('FILE:', meta.field.file)
            else:
                print('NO FILE')

            if debug == 1:
                print('Structural signature:', signature, '\n')
            elif debug == 2:
                print(meta, '\n')
        #--- End: if
    #--- End: for    

    # ----------------------------------------------------------------
    # Within each group of fields with the same structural signature,
    # aggregate as many fields as possible. Sort the signatures so
    # that independent aggregations of the same set of input fields
    # return fields in the same order.
    # ----------------------------------------------------------------
    for signature in sorted(signatures):

        meta = signatures[signature]

        if len(meta) == 1:
            # There's only one field with this signature, so we can
            # add it straight to the output list and move on to the
            # next signature.
            output_fields.append(meta[0].field) 
            if messages:
                print("Field has unique structural signature: %s" %
                      repr(meta[0].field))

            continue
        #--- End: if

        # ------------------------------------------------------------
        # Still here? Then there are 2 or more fields with this
        # signature which may be aggregatable. These fields need to be
        # passed through until no more aggregations are possible. With
        # each pass, the number of fields in the group will reduce by
        # one for each aggregation that occurs. Each pass represents
        # an aggregation in another dimension
        # ------------------------------------------------------------
        while 1:
            start_len_fields = len(meta)
            if start_len_fields == 1:
                break
            
            # --------------------------------------------------------
            # For each dimension's 1-d coordinates, create the
            # canonical hash value and first (and last) datum.
            # --------------------------------------------------------
            _create_hash_and_first_values(meta)
            
            # --------------------------------------------------------
            # Separate the fields with the same structural signature
            # into groups such that either within each group the
            # fields' spaces differ in only one dimension or each
            # group contains only one field.
            # --------------------------------------------------------
            grouped_meta = _group_fields(meta)

            # --------------------------------------------------------
            # Within each group, aggregate as many fields as possible.
            # --------------------------------------------------------
            for m in grouped_meta:

                if len(m) == 1:
                    continue
                
                # ----------------------------------------------------
                # Sort the fields in place by the canonical first
                # values of their 1-d coordinates.
                # ----------------------------------------------------
                _sorted_by_first_values(m)

                # ----------------------------------------------------
                # Check that the aggregating dimension's 1-d
                # coordinates don't overlap, and don't aggregate
                # anything in this group if any do.
                # ----------------------------------------------------
                if not _ok_coordinate_arrays(m, overlap, contiguous):
                    continue

                # ----------------------------------------------------
                # Still here? Then pass through the fields
                # ----------------------------------------------------
                while len(m) >= 2:
                    new = _aggregate_2_fields(m[0], m[1],
                                              rtol=rtol, atol=atol,
                                              contiguous=contiguous,
                                              overlap=overlap,
                                              strict_units=strict_units,
                                              messages=messages,
                                              concatenate=concatenate)

                    if new:
                        # Successfully aggregated these two fields
                        m[0:2] = [new] 
                    else:
                        # Couldn't aggregate these two fields, so move the
                        # first one onto the list of output fields
                        output_fields.append(m.pop(0).field)
                #--- End: while

            #--- End: for

            # Reassemble the fields as a single list
            meta = [m for gm in grouped_meta for m in gm]

            if len(meta) == start_len_fields:
                # ----------------------------------------------------
                # No aggregation took place on this pass, so we're
                # done and can add these fields to the output list.
                # ----------------------------------------------------
                break            
        #--- End: while

        output_fields.extend((m.field for m in meta))
    #--- End: for

    return output_fields
#--- End: def

