from glob import iglob
from os   import SEEK_SET
import struct
from .utils     import _files
from .fieldlist import FieldList
from .netcdf    import read      as netcdf_read
from .pp        import read      as pp_read
from .aggregate import aggregate as cf_aggregate

def _read_a_file(filename,
                 aggregate_options={},
                 ignore_ioerror=False,
                 verbose=False,
                 close=False,
                 squeeze=1,
                 prop={},
                 attr={},
                 coord={},
                 cellsize={}):
    '''

Read the contents of a single file into a field list.

:Parameters:

    filename : str
        The file name.

    verbose : bool, optional
        If True then print information to stdout.
    
    close : bool, optional
        If True then close the file after it has been read.
        
    ignore_ioerror : bool, optional
        If True then return an empty field list if reading the file
        produces an IOError, as would be the case for an empty file,
        unknown file format, etc. By default the IOError is raised.
    
    aggregate_options : dict, optional
        The keys and values of this dictionary may be passed as
        keyword parameters to an external call of the aggregate
        function.

:Returns:

    out : FieldList
        The fields in the file.

'''
    # Find this input file's format
    if filename.startswith('http://'):
        format = 'nc'
        openfile = filename
    else:        
        try:
            openfile = open(filename, 'rb')             
            _files(openfile)
        except IOError as error:
            if ignore_ioerror:
                if verbose:
                    print 'Ignoring IOError: %(error)s' % locals()
                return FieldList()
            #--- End: if
            raise IOError(error)
        #--- End: try

        try:
            format = _file_format(openfile)        
        except IOError as error:
            if ignore_ioerror: 
                if verbose:
                    print 'Ignoring IOError: %(error)s' % locals()
                return FieldList()
            #--- End: if
            raise IOError(error)
    #--- End: if

    # ----------------------------------------------------------------
    # Read the file into fields
    # ----------------------------------------------------------------
    if format == 'nc':
        fields = netcdf_read(openfile, close=close)
        
    elif format == 'pp':        
        fields = pp_read(openfile, close=close)

        # For PP fields, the default is strict_units=False
        if 'strict_units' not in aggregate_options:
            aggregate_options['strict_units'] = False
    
    # Developers: Add more file formats here ...

    # ----------------------------------------------------------------
    # Subset matching fields before aggregation
    # ----------------------------------------------------------------
    fields = fields.subset(prop=prop, attr=attr,
                           coord=coord, cellsize=cellsize)

    # ----------------------------------------------------------------
    # Do one of:
    # 
    # 1) Squeeze the fields, i.e. remove all size one dimensions from
    # all field data arrays and turn any corresponding size 1, 1-d
    # coordinates and cell measures into scalars.
    #
    # 2) Unsqueeze the fields, i.e. Include all size 1 dimensions in
    # the data array and make any scalar coordinates and cell measures
    # size 1, 1-d.
    #
    # 3) Nothing
    # ----------------------------------------------------------------
    if squeeze == 1:
        fields.squeeze()
    elif squeeze == 0:
        pass
    elif squeeze == -1:
        fields.unsqueeze()
    else:
        raise ValueError("Incorrect setting of squeeze: %s" % squeeze)

    # ----------------------------------------------------------------
    # Return the fields
    # ----------------------------------------------------------------
    return fields
#--- End: def

def read(files,
         verbose=False,
         index=None, 
         ignore_ioerror=False,
         close=False,
         aggregate=True,
         squeeze=1,
         prop={},
         attr={},
         coord={},
         cellsize={}):
    '''

Read fields from files from disk or from an OPeNDAP server.

Currently supports netCDF and PP formats.

Each field contains it's file name in its `file` attribute and, where
appropriate, its netCDF variable name in its `ncvar` attribute. Note
that fields may be selected by netCDF variable name by setting a value
(or values) of 'ncvar' via the `attr` keyword.

Fields referenced by formula_terms transforms within other fields are
not included in the returned list of fields.

:Parameters:

    files : str or sequence of strs
        A string or sequence of strings giving the file names or
        OPenDAP URLs from which to read fields. For files on disk, the
        file names may contain wild cards as understood by the python
        glob module.
    
    index : int, optional
        Only return the field with this non-negative index in the
        otherwise returned full list of fields. By default return all
        (otherwise selected) fields from the input files.
    
    verbose : bool, optional
        If True then print information to stdout.
    
    close : bool, optional
        If True then close each file after it has been read.
        
    ignore_ioerror : bool, optional
        If True then ignore any file which raises an IOError whilst
        being read, as would be the case for an empty file, unknown
        file format, etc. By default the IOError is raised.
    
    aggregate : bool or dict, optional
        If True or a dictionary then aggregate the fields read in from
        all input files into as few fields as possible using the CF
        aggregation rules. If a dictionary then provide these
        parameters to the aggregate function.
    
    prop : dict, optional
        Only return fields matching the given conditions on their CF
        attributes. Refer to the field's `match` method for details.

    attr : dict, optional
        Only return fields matching the given conditions on their
        attributes. Refer to the field's `match` method for details.

    coord : dict, optional
        Only return fields matching the given conditions on their
        coordinates. Refer to the field's `match` method for details.

    cellsize : dict, optional
        Only return fields matching the given conditions on their coordinates'
        cellsizes. Refer to the field's `match` method for details.

:Returns:
    
    out : FieldList
        A list of fields.

:Raises:

    IOError :
        Raised if `ignore_ioerror` is False and there was an I/0
        related failure, including unknown file format.


**Examples**

>>> f = cf.read('file*.nc')
>>> type(f)
<class 'cf.field.FieldList'>
>>> f
[<CF Field: pmsl(30, 24)>,
 <CF Field: z-squared(17, 30, 24)>,
 <CF Field: temperature(17, 30, 24)>,
 <CF Field: temperature_wind(17, 29, 24)>]

>>> cf.read('file*.nc')[0:2]
[<CF Field: pmsl(30, 24)>,
 <CF Field: z-squared(17, 30, 24)>]

>>> cf.read('file*.nc', index=0)
[<CF Field: pmsl(30, 24)>]

>>> cf.read('file*.nc')[-1]
<CF Field: temperature_wind(17, 29, 24)>

>>> cf.read('file*.nc', prop={'units': 'K'})
[<CF Field: temperature(17, 30, 24)>,
 <CF Field: temperature_wind(17, 29, 24)>]

>>> cf.read('file*.nc', attr={'ncvar': 'ta'})
[<CF Field: temperature(17, 30, 24)>]

>>> cf.read('file*.nc', prop={'standard_name': '.*pmsl*', 'units':'K|Pa'})[0]
<CF Field: pmsl(30, 24)>

>>> cf.read('file*.nc', prop={'units':['K, 'Pa']})
[<CF Field: pmsl(30, 24)>,
 <CF Field: temperature(17, 30, 24)>,
 <CF Field: temperature_wind(17, 29, 24)>]

'''

    # Initialize the output list of fields
    field_list = FieldList()

    if isinstance(aggregate, dict):
        aggregate_options = aggregate.copy()
        aggregate         = True
    else:
        aggregate_options = {}

    # Count the number of fields (in all files) and the number of
    # files
    field_counter = -1
    file_counter  = 0

    if isinstance(files, basestring):
        files = (files,)

    for file_glob in files:

        if file_glob.startswith('http://'):
            # Do not glob a URL
            files2 = (file_glob,)
        else:
            # Do glob files on disk
            files2 = iglob(file_glob)

        for filename in files2:

            # Print some informative messages
            if verbose and index is None:
                print 'File: %(filename)s' % locals()

            # --------------------------------------------------------
            # Read the file into fields
            # --------------------------------------------------------
            fields = _read_a_file(filename,
                                  ignore_ioerror=ignore_ioerror,
                                  verbose=verbose,
                                  close=close,
                                  aggregate_options=aggregate_options,
                                  squeeze=squeeze, 
                                  prop=prop,
                                  attr=attr,
                                  coord=coord,
                                  cellsize=cellsize)
            
            # --------------------------------------------------------
            # Add this file's fields to those already read from other
            # files
            # --------------------------------------------------------
            field_list.extend(fields)
   
            field_counter = len(field_list)
            file_counter += 1

            # Print some informative messages
            if verbose and index is None:
                i = field_counter - len(fields)
                for f in fields:
                    print '%d: %s' % (i, repr(f))
                    i += 1
            #--- End: if

            # --------------------------------------------------------
            # If we only want one field from all input files then
            # break now if we have got it
            # --------------------------------------------------------
            if index is not None and field_counter >= index+1:
                break
        #--- End: for
            
        # ------------------------------------------------------------
        # If we only one field from all input files then break now if
        # we have got it
        # ------------------------------------------------------------
        if index is not None and field_counter >= index:
            break
    #--- End: for     

    # Error check
    if not ignore_ioerror:
        if not file_counter:
            raise RuntimeError('No files found')
        if not field_list:
            raise RuntimeError('No fields found from '+str(file_counter)+' files')
    #--- End: if

    if index >= len(field_list):
        raise IndexError('FieldList index='+str(index)+' is out of range')

    # Print some informative messages
    if verbose:
        if index is None:  
            print("Read %d field%s from %d file%s" % 
                  (field_counter, ('s' if field_counter!=1 else ''),
                   file_counter , ('s' if file_counter !=1 else '')))
        else:
            print 'File: %(filename)s' % locals()
            print '%d: %s' % (0, repr(field_list[index]))
            print 'Read 1 field from 1 file'
    #--- End: if
    
    # ----------------------------------------------------------------
    # 
    # ----------------------------------------------------------------
    try:
        field_list = field_list[index]
    except TypeError:
        pass

    # ----------------------------------------------------------------
    # Aggregate the output fields
    # ----------------------------------------------------------------    
    if len(field_list) > 1 and aggregate:   
        field_list = cf_aggregate(field_list, **aggregate_options)
        if verbose:
            nfields = len(field_list)
            print('Fields aggregated into %d field%s' %
                  (nfields, ('' if nfields==1 else 's')))
    #--- End: if

    return field_list
#--- End: def

def _file_format(openfile):
    '''

Read and interpret a file's magic number.

Takes a file object as input and assumes that we're currently pointing
to the beginning of the file. The file is rewound to the beginning
after reading the magic number.

Developers: For each new file format that is supported, another 'if'
clause needs to be added to this function.

:Parameters:

    openfile : file
        A python file object.

:Returns:

    out : str
        The format of the file.

:Raises:
 
    IOError :
        If the file has an unknown format.

**Examples**

>>> try:
...     format = _file_format(openfile)        
... except IOError as error:
...     # Do something
... else:
...     # Do something else

''' 
    if openfile.name.startswith('http://'):
        # Assume that a URL is netCDF
        return 'nc'

    # Read the magic number
    try:
        magic_number = struct.unpack('=L', openfile.read(4))[0]
    except struct.error:
        raise IOError("File %s is empty (contains fewer than 4 words)" %
                      openfile.name)

    # Reset the pointer to the beginning of the file
    openfile.seek(0, SEEK_SET)

    # ----------------------------------------------------------------
    # netCDF
    # ----------------------------------------------------------------
    if magic_number in (21382211, 1128547841, 1178880137, 38159427):
        return 'nc'

    # ----------------------------------------------------------------
    # PP
    # ----------------------------------------------------------------
    if magic_number in (256, 65536):
        return 'pp'

    # ----------------------------------------------------------------
    # Developers: Add more file formats here ...
    # ----------------------------------------------------------------

    # Still here?
    raise IOError("File %s has unsupported format: Magic number=%d" % 
                  (openfile.name, magic_number))
#--- End: def

