# Joao Lencart e Silva: j.lencart@ua.pt
import csv
import numpy
import netCDF4
from netCDF4 import Dataset
from odcutils._etc import _parts
from datetime import datetime, timedelta
import netcdftime
import operator

# Set default encoding
netCDF4.default_encoding = 'utf-8'

def loadSBE(dump, fname, gfile):
    """Loads fname netcdf file with the dump from a cnv SBE output file"""
    # Read timestamp
    for line in dump:
        if line.startswith('# start_time'):
            c = line.split()
            c = ''.join(c[3:])
    # Create datetime object
    do = datetime.strptime(c, '%b%d%Y%H:%M:%S')
    # Extract pre-industrial from gregorian
    gdate = _parts.dTime2Loch(do)
    # Find header end and cut it
    # Try in windows and unix formats
    try:
        hd = dump.index('*END*\r\n')
    except ValueError:
        hd = dump.index('*END*\n')
    dump = dump[hd + 1:]
    # Initialise data array
    data = []
    # Convert list of strings to number
    for line in dump:
        numbers = map(float, line.split())
    # append to data array
        data.append(numbers)
    # open ntcdf file
    fdataset = Dataset(fname, 'a')
    # time (convert to seconds from pre-industrial datum)
    fdataset.variables['time'][:] = gdate * 3600 * 24
    # Lat, Lon  
    fdataset.variables['lat'][:] = gfile['lat_dat']
    fdataset.variables['lon'][:] = gfile['lon_dat']
    # Make numpy array 
    data = numpy.vstack(data)
    # Transpose for looping over single variable
    data = data.transpose()
    # Remove last row (flag)
    data = data[0:-1,:]
    # Try to dump data and if it fails return the and variables files
    # which are  wrongly dimensioned
    try:
        for key, line in \
            zip(reversed(fdataset.variables), reversed(data)):
    # Find fill value for the variable
            fv = fdataset.variables[key]._FillValue
    # Create masked array
            mma = numpy.ma.masked_where(line == fv, line)
    # Change fill_value of masked array to nc fill_value
            mma.set_fill_value(fv)
            fdataset.variables[key][:] = mma
    except IndexError:
        print 'Exception while loading', key, 'into', fname
        print 'Original data has length', len(line),\
              'new FISUA netCDF variable has shape', \
              list(fdataset.variables[key][:].shape)
    # Write history
    do = datetime.now()
    date = do.strftime('%Y%m%d%H%M%S')
    hist = '\t* ' + date + ' - Converted to FISUA v0.0 format\n'
    fdataset.history = fdataset.history + hist
    fdataset.modification_date = date
    # Close netcdf file
    fdataset.close()

def loadIDR(dump, fname, gfile):
    """Loads fname netcdf file with the dump from a cnv SBE output file.
    """
    # open ntcdf file
    fdataset = Dataset(fname, 'a')
    fv = fdataset._FillValue
    # Read timestamp
    for line in dump:
        if line.split()[0] == 'DATA:':
            c = line.split()
            c = c[1]+c[3]
    # Create datetime object
            do = datetime.strptime(c, '%Y-%m-%d%H:%M')
            date = do.strftime('%Y%m%d%H%M%S')
    # Extract pre-industrial from gregorian
    gdate = _parts.dTime2Loch(do)
    # Find header end and cut it
    for l in range(0,len(dump)):
        if dump[l].split()[0] == 'hh:mm:ss.sss':
            break
    dump = dump[l + 1:]
    # Initialise data array
    data = []
    # Convert list of strings to number
    for line in dump:
        numbers = []
    # Ignore first column with the date and last column with number of scans 
    # for salinity calc.
        for val in line.split()[1:]:
            try:
                numbers.append(float(val))
    # If the the number has an * for invalid
            except ValueError:
                numbers.append(fv)
    # append to data array
        data.append(numbers)
    # time (convert to seconds from pre-industrial datum)
    fdataset.variables['time'][:] = gdate * 3600 * 24
    # Lat, Lon  
    fdataset.variables['lat'][:] = gfile['lat_dat']
    fdataset.variables['lon'][:] = gfile['lon_dat']
    # Make numpy array 
    data = numpy.vstack(data)
    # Transpose for looping over single variable
    data = data.transpose()
    # Remove last row (flag)
    data = data[0:-1,:]
    # Try to dump data and if it fails return the and variables files
    # which are  wrongly dimensioned
    try:
        for key, line in \
            zip(reversed(fdataset.variables), reversed(data)):
    # Find fill value for the variable
            fv = fdataset.variables[key]._FillValue
    # Create masked array
            mma = numpy.ma.masked_where(line == fv, line)
    # Change fill_value of masked array to nc fill_value
            mma.set_fill_value(fv)
            fdataset.variables[key][:] = mma
    except IndexError:
        print 'Exception while loading', key, 'into', fname
        print 'Original data has length', len(line),\
              'new FISUA netCDF variable has shape', \
              list(fdataset.variables[key][:].shape)
    # Write history
    do = datetime.now()
    date = do.strftime('%Y%m%d%H%M%S')
    hist = '\t* ' + date + ' - Converted to FISUA v0.0 format\n'
    fdataset.history = fdataset.history + hist
    fdataset.modification_date = date
    # Close netcdf file
    fdataset.close()

def loadRBR(dump, fname, gfile):
    """Loads fname netcdf file with the dump from a single cast ascii file"""    
    # Read timestamp
    timestamp = float(dump[0].split()[0])
    do = _parts.dNum2DTime(timestamp)
    date = do.strftime('%Y%m%d%H%M%S')
    # Extract pre-industrial from gregorian
    gdate = _parts.dTime2Loch(do)
    # Initialise data array
    data = []
    # Convert list of strings to number
    for line in dump:
        numbers = map(float, line.split())
    # append to data array
        data.append(numbers)
    # open ntcdf file
    fdataset = Dataset(fname, 'a')
    # time (convert to seconds from pre-industrial datum)
    fdataset.variables['time'][:] = gdate * 3600 * 24
    # Lat, Lon
    fdataset.variables['lat'][:] = gfile['lat_dat']
    fdataset.variables['lon'][:] = gfile['lon_dat']
    # Make numpy array
    data = numpy.vstack(data)
    # Transpose for looping over single variable
    data = data.transpose()
    # Remove 1st row (flag)
    data = data[1:,:]
    # Try to dump data and if it fails return the and variables files
    # which are  wrongly dimensioned
    try:
        for key, line in \
            zip(reversed(fdataset.variables), reversed(data)):
    # Find fill value for the variable
            fv = fdataset.variables[key]._FillValue
    # Create masked array
            mma = numpy.ma.masked_where(line == fv, line)
    # Change fill_value of masked array to nc fill_value
            mma.set_fill_value(fv)
            fdataset.variables[key][:] = mma
    except IndexError:
        print fname, key
    # Write history
    do = datetime.now()
    date = do.strftime('%Y%m%d%H%M%S')
    hist = '\t*' + date + ' - Converted to FISUA v0.0 format\n'
    fdataset.history = fdataset.history + hist
    fdataset.modification_date = date
    # Close netcdf file
    fdataset.close()

def loadDST(dump, fname, gfile):
    """Loads fname netcdf file with the dump from an ascii file"""
    fdataset = Dataset(fname, 'a')
    data = []
    hdr = 0
    # find end of header
    for line in dump:
        if line[0] == '#':
            hdr += 1
        else:
            break
    dump = dump[hdr:]
    for line in dump:
    # Build timestamps
        timestamp = line.split()[1]+line.split()[2]
        do = datetime.strptime(timestamp,'%d.%m.%y%H:%M:%S')
        gdate = _parts.dTime2Loch(do) * 24 * 3600
    # Try to convert the string of the next columns to float
        try:
            c1 = float(line.split()[3])
    # If it fails, write a fill value
        except (ValueError, IndexError):
            c1 = fdataset._FillValue 
    # Try to convert the string of the next columns to float
        try:
            c2 = float(line.split()[4])
    # If it fails, write a fill value
        except (ValueError, IndexError):
            c2 = fdataset._FillValue
    # append to data
        data.append([gdate, c1, c2])
    # Lat, Lon
    fdataset.variables['lat'][:] = gfile['lat_dat']
    fdataset.variables['lon'][:] = gfile['lon_dat']
    # Make numpy array
    data = numpy.vstack(data)
    # Deal with the masked array
    fv = fdataset.variables['time']._FillValue
    d0 = numpy.ma.masked_where(data[:,0] == fv, data[:,0])
    d0.set_fill_value(fv)
    fv = fdataset.variables['wtemp']._FillValue
    d1 = numpy.ma.masked_where(data[:,1] == fv, data[:,1])
    d1.set_fill_value(fv)
    fv = fdataset.variables['water_press']._FillValue
    d2 = numpy.ma.masked_where(data[:,2] == fv, data[:,2])
    d2.set_fill_value(fv)
    fdataset.variables['time'][:] = d0
    fdataset.variables['wtemp'][:] = d1
    fdataset.variables['water_press'][:] = d2
    # Try to load height above bed
    try:
        fdataset.variables['z'][:] = gfile['MAB']
    except KeyError:
        pass
    # Write history
    do = datetime.now()
    date = do.strftime('%Y%m%d%H%M%S')
    hist = '\t*' + date + ' - Converted to FISUA v0.0 format\n'
    fdataset.history = fdataset.history + hist
    fdataset.modification_date = date
    fdataset.close()

def loadAWS(fname, gfile):
    """Loads fname netcdf file with the contents of the csv file s
    specifient in the gfile"""
    pass
    # Initialise weather station variable
    aws = []
    # Open csv file
    f1 = fopen(gfile['source_file'])
    # Read data
    reader = csv.DictReader(f1)
    for line in reader:
        aws.append(line)
    # Close csv file
    f1.close()
    # Open netcdf file
    fdataset = Dataset(fname, 'a')
    # Convert time to pre-industrial datum
    # Convert direction to wind_to (Oceanographic)
    # Convert Rainfall from mm/h to mm/min
    # Close netcdf file


def loadADCPEPIC(fname, gfile):
    """Loads fname netcdf file with the contents on the the file
    referenced in gfile, converting from EPIC format to FISUA-v0.0"""
    print 'Started loading variables'
    # define epic target variables
    evars = ['time',
                  'bindist',
                  'lon',
                  'lat',
                  'u_1205',
                  'v_1206',
                  'w_1204',
                  'Werr_1201',
                  'PGd_1203',
                  'Tx_1211',
                  'Hdg_1215',
                  'Ptch_1216',
                  'Roll_1217',
                  'P_1294']
    # Define fisua target variables by order of epic
    fvars =\
                ['time',
                 'z',
                 'lon',
                 'lat',
                 'water_u',
                 'water_v',
                 'water_w',
                 'water_werror',
                 'pgd',
                 'wtemp',
                 'hdg',
                 'pitch',
                 'roll',
                 'water_press']

    fvel =      ['water_u',
                 'water_v',
                 'water_w',]

    # Open FISUA file
    fisnc = Dataset(fname, 'a')
    # Open EPIC file
    epicnc = Dataset(gfile['source_file'], 'r')
    # Convert attributes of variables from EPIC to FISUA
    _epic2fisua(epicnc, fisnc, evars, fvars)
    # Dump data
    for evar, fvar in zip(evars, fvars):
        print 'Dumping', evar, 'into', fvar
        if fvar in fvel:
    # convert all velocities from mm/s to cm/s
            fisnc.variables[fvar][:] = epicnc.variables[evar][:]*10.
        elif fvar == 'water_press':
            try:
    # GET A BETTER CONVERSION FROM decaPascal to dBar
                print 'Converting pressure from decaPascal to dBar'
    # Convert pressure
                fisnc.variables[fvar][:] = epicnc.variables[evar][:] / 1000
            except KeyError:
                print 'Pressure not found in source file: skipped'
                continue
        else:
            try:
                fisnc.variables[fvar][:] = \
                    numpy.array(epicnc.variables[evar][:])
            except KeyError:
                print evar, 'not found: skipped.'
                continue

    print 'Converting time from Julian calendar into pre-industrial datum'
    # Get time from EPIC
    d = epicnc.variables['time'][:]
    ms = epicnc.variables['time2'][:]
    # Convert to datetime
    do_arr = map(netcdftime.DateFromJulianDay, d)
    # Set date to 00:00 GMT
    do_arr = map((lambda do: operator.add(do, timedelta(hours = -12))), do_arr)
    # Define the timedelta lambda function
    td = lambda x: timedelta(milliseconds = x)
    # Convert numpy.int32 to int
    td_arr = map(td, map(int,ms))
    # Add the timedelta to the datetime object
    do_arr = map(operator.add, do_arr,  td_arr)
    # Convert to Loch time
    ltime = map(_parts.dTime2Loch, do_arr)
    # Dump time
    fisnc.variables['time'][:] = ltime
    # Convert to seconds
    fisnc.variables['time'][:] = fisnc.variables['time'][:] * 24. * 3600.
# GET A BETTER CONVERSION FROM decaPascal to dBar
#    print 'Converting pressure from decaPascal to dBar'
#    # Convert pressure
#    fisnc.variables['water_press'][:] = fisnc.variables['water_press'][:]\
#                                        / 1000
    # Convert velocity cm/s -> mm/s
#    print 'Converting velocity units from cm/s to mm/s'
#    fisnc.variables['water_u'][:] = fisnc.variables['water_u'][:] * 10
#    fisnc.variables['water_v'][:] = fisnc.variables['water_v'][:] * 10
#    fisnc.variables['water_w'][:] = fisnc.variables['water_w'][:] * 10
    # Write history
    print 'Writting history'
    do = datetime.now()
    sdate = do.strftime('%Y%m%d%H%M%S')
    hist = '*\t' + sdate + ' - Transformed from EPIC to FISUA v0.0 standard.\n'
    fisnc.history = fisnc.history + '*\t - ' +  epicnc.history + hist
    fisnc.modification_date = sdate
    # Close FISUA file
    fisnc.close()
    # Close EPIC file
    epicnc.close()
    

def _epic2fisua(epicnc, fisnc, evars, fvars):
    """Reads attributes of variables in the EPIC format and converts them
    to FISUA format"""
    print 'Adjusting attributes of variables'
    # set global attribute for start and end ensembles
    fisnc.start_ensemble = epicnc.variables['Rec'][0]
    fisnc.end_ensemble = epicnc.variables['Rec'][-1]
    # Attributes common to all variables
    for evar, fvar in zip(evars, fvars):
    #    try:
    # import fill values 
    #        fv = epicnc.variables[evar]._FillValue
    #        print 'evar:', evar, epicnc.variables[evar]._FillValue
    #        fisnc.variables[fvar]._FillValue = fv
    #        print 'fvar:', fvar, fisnc.variables[fvar]._FillValue 
    #    except AttributeError:
    #        print '_FillValue was not found for:', evar, 'or', fvar
    #        continue
        try:
    # import sample interval
            si = fisnc.time_between_ping_groups
            fisnc.variables[fvar].sample_interval = si
        except AttributeError:
            continue
        try:
    # import averaging interval
            fisnc.variables[fvar].sample_interval = epicnc.DELTA_T
        except AttributeError:
            continue
        try:
    # import valid range 
            fisnc.variables[fvar].valid_range = \
            epicnc.variables[evar].valid_range
        except AttributeError:
            continue
        except KeyError:
            print evar, 'not found: skipped.'
            continue
        try:
    # import min and max 
            fisnc.variables[fvar].minimum = \
            epicnc.variables[evar].minimum
            fisnc.variables[fvar].maximum = \
            epicnc.variables[evar].maximum
        except AttributeError:
            continue
        except KeyError:
            print evar, 'not found: skipped.'
            continue


def loadMB(dump, fname, gfile):
    """Loads fname netcdf file with the dump from a dat minibat output file"""
    # Split dump by white space
    d = map(str.split, dump)
    # Initialise data array
    data = []
    # Loop records
    for l in d:
    # Convert string to float
        a = map(float,l)
    # Append to data
        data.append(a)
    # Make numpy array
    data = numpy.vstack(data)
    # Transpose for looping over single variable
    data = data.transpose()
    # Build lambda function to return a datetime object for the 1st day of
    # the year
    def dater(y) : return datetime(y, 1, 1)
    # Build lambda function to return a timedelta object with days as input
    # subtracting 1 day to convert between julian and yearday (1.0001 vs
    # 0.0001 on shortly past midnight January the 1st).
    def deltaer(y) : return timedelta(days = y - 1)
    # Create datetime objects for the first day of the year of the timestamp
    dol = map(dater, map(int, data[0]))
    # Create timedelta objects for each time stamp
    dtl = map(deltaer, data[1])
    # Add timedelta to datetime objects
    dol = map(operator.add, dol,dtl)
    # Extract pre-industrial from gregorian
    gdate = map(_parts.dTime2Loch, dol)
    # Remove 1st two rows (year and YD) from data
    data = data[2:,:]
    # Remove last row (lay-back) from data
    data = data[0:-1,:]
    # open ntcdf file
    fdataset = Dataset(fname, 'a')
    # time (convert to seconds from pre-industrial datum)
    fdataset.variables['time'][:] = numpy.array(gdate) * 3600. * 24.
    # Try to dump data and if it fails return the and variables files
    # which are  wrongly dimensioned
    try:
        for key, line in \
            zip(reversed(fdataset.variables), reversed(data)):
    # Find fill value for the variable
            fv = fdataset.variables[key]._FillValue
    # Create masked array
            if key == 'z':
    # Convert the depths into positive values
                l = -1. *  numpy.array(line)
                mma = numpy.ma.masked_where(line == fv, l)
            else:
                mma = numpy.ma.masked_where(line == fv, line)
    # Change fill_value of masked array to nc fill_value
            mma.set_fill_value(fv)
            fdataset.variables[key][:] = mma
    except IndexError:
        print 'Exception while loading', key, 'into', fname
        print 'Original data has length', len(line),\
              'new FISUA netCDF variable has shape', \
              list(fdataset.variables[key][:].shape)
    # Write history
    do = datetime.now()
    date = do.strftime('%Y%m%d%H%M%S')
    hist = '\t* ' + date + ' - Converted to FISUA v0.0 format\n'
    fdataset.history = fdataset.history + hist
    fdataset.modification_date = date
    # Close netcdf file
    fdataset.close()
