#************************************************************************
# io modules 
#************************************************************************
import os
import csv
import time, datetime
import numpy as np
import Nio
import powercurve
import wrftools
import tools
from csvtools import CommentedFile
from custom_exceptions import DomainError
from odict import OrdDict

HOUR = datetime.timedelta(0, 60*60)                 


#*****************************************************************
# Read location files
#*****************************************************************

def _read_locations(locations_file):
    """Reads a set of names, locations, heights and variable names from a text file """

    #rec = np.genfromtxt(locations_file, names=True, delimiter=',', dtype=None)
    logger = wrftools.get_logger()
    logger.debug("Reading locations from %s" %locations_file)
    f = open(locations_file, 'r')
    reader = csv.DictReader(CommentedFile(f))
    contents = [ row for row in reader]
    f.close()
    return contents




def _in_domain(lat, lon, lat2d, lon2d):
    """Tests whether (lat,lon) is within domain defined by lat2d and lon2d.
    Simply tests lat within lat2d and lon within lon2d
    Returns boolean, true if the point is within the domain """    

    logger = wrftools.get_logger()
    
    min_lon = np.min(lon2d)
    max_lon = np.max(lon2d)
    min_lat = np.min(lat2d)
    max_lat = np.max(lat2d)
    
    if (lat < min_lat) or (lat> max_lat) or (lon<min_lon) or (lon>max_lon):
          logger.debug("point (%0.3f, %0.3f) is not within domain (%0.2f, %0.3f, %0.3f, %0.3f)" %(lat, lon, min_lat, max_lat, min_lon, max_lon))
          return False
    else:
        return True
    

def _get_index(lat, lon, lat2d, lon2d):
    """ Finds the nearest mass point grid index to the point (lon, lat).
        Works but is slow as just naively searches through the arrays point
        by point. Would be much better to implement the ncl function 
        wrf_user_get_ij to use projection information to actually calculate
        the point in porjected space. 
        
        Arguments:
            @lat: the latitude of the target point
            @lon: longitude of the target point
            @lat2d: 2d array of latitudes of grid points
            @lon2d: 2d array of longitudes of grid points
            
       Returns (i,j) of nearest grid cell. Raises exception if outside"""

    logger = wrftools.get_logger()
    logger.debug("finding index of (%0.3f, %0.3f) " %(lat,lon))
    west_east   = lat2d.shape[0]
    south_north = lat2d.shape[1]    
    logger.debug("dimensions of domain are: %d south_north, %d west_east" %(south_north, west_east))
    
    if not _in_domain(lat, lon, lat2d, lon2d):
        logger.error('point (%0.3f, %0.3f) not in model domain' % (lat, lon))
        raise DomainError('point (%0.3f, %0.3f) not in model domain' %(lat, lon))
    
    
    
    # 
    # slow, but will work. Just search through the arrays until we 
    # hit the nearest grid point
    #
    min_dist = 10000000  # if the point is further than 10M m away, don't bother!
    min_x = 0
    min_y = 0 
    

    for x in range(west_east-1):
        for y in range(south_north-1):            
            point_lat = lat2d[x,y]
            point_lon = lon2d[x,y]
            
            d = tools.haversine(lat, lon, point_lat, point_lon)
            
            if d < min_dist:
                min_dist = d
                min_x = x
                min_y = y
    
    if min_x==0 or min_y==0 or min_x>west_east or min_y>south_north:
        logger.error("Point is on/off edge of of model domain, this should have been caught earlier!")
        raise DomainError("Point is on/off edge of of model domain")
        
        
    
    logger.debug('nearest grid index is x=%d, y=%d, %0.3f m away' %(min_x, min_y, min_dist))
    logger.debug('latitude, longitude of original is (%0.3f, %0.3f)' %(lat, lon))
    logger.debug('latitude, longitude of index is (%0.3f, %0.3f)' %(lat2d[min_x,min_y], lon2d[min_x,min_y]))
    
    return (min_x, min_y, min_dist)
    
def _read_grb_table(filename):
    """Reads a table mapping standard variable names to the names found by Nio
    in the grib file. Uses an ordered dictionary to that variables are stored
    in the same order they are defined, so that derived variables come after 
    definition of their components.
    This could (should?) be done using the numeric grib code."""
    

    f      = CommentedFile(open(filename, 'r'))
    lines  = f.readlines()
    tokens = [l.strip('\n').split(':') for l in lines]
    d = OrdDict()
    for t in tokens:
        d[t[0]] = t[1]
    return d

def _expression(var_name):
    """Works out whether a variable definition is actually 
    an expression """
    return '(' in var_name or '+' in var_name or '-' in var_name

def write_time_series(config):
    """Writes time series of variables out to csv files.
    Relies on PyNio."""

    logger = wrftools.get_logger()
    
    logger.info('*** EXTRACTING TIME SERIES ***')

    domain          = config['domain']
    test_case       = config['test_case']
    domain_dir      = config['domain_dir']
    model_run       = config['model_run']
    dom             = config['dom']
    init_time       = config['init_time']
    locations_file  = config['locations_file']


    locations       = _read_locations(locations_file)
    grb_table       = _read_grb_table('%s/%s/grb_table.txt' % (domain_dir, model_run))
    grb_file        = '%s/%s/archive/wrfpost_d%02d_%s.grb' %(domain_dir, model_run,dom, init_time.strftime('%Y-%m-%d_%H'))
    ts_dir          = '%s/%s/tseries' % (domain_dir, model_run)

    if not os.path.exists(ts_dir):
        os.mkdir(ts_dir)
    
    #
    # Actually open the dataset.
    # We should add another layer of redirection
    # to the variable names, so we use a generic 
    # UGRD, VGRD here, in case the name changes 
    # in other grib files
    #
    logger.debug("opening dataset: %s" % grb_file)
    dataset = Nio.open_file(grb_file, 'r')
    
    
    #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # TODO
    # Implement extracting the variables from the 
    # locations file.  We need to make this
    # more flexible and generic. E.g. suppose we
    # want to extract temperature? 
    #
    # ALSO TODO
    # Nio extended selection supports inerpolation to multiple points 
    # in one step - vectorise this
    #
    #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    
    data = {}
    for std_name in grb_table.keys():
        long_name = grb_table[std_name]
        logger.debug("Extracting %s as %s" % (long_name, std_name))
        
        if not _expression(long_name):
            var = dataset.variables[long_name]
            data[std_name] = var
        else:
            logger.warn("Derived variables are not generally implemented")
            if std_name == 'SPEED':
                logger.warn("You're lucky derived variable SPEED has been hard-coded")
                speed  = (data['UGRD'][:]**2 + data['VGRD'][:]**2)**0.5
                data[std_name] = speed
                #logger.debug(speed)
            else:
                logger.warn("Skipping derived variable %s" %long_name)
                continue
         
    #
    # Check init_time from grb file
    #
    init_str = data['UGRD'].initial_time
    init_grb = datetime.datetime(*time.strptime(init_str, '%m/%d/%Y (%H:%M)')[0:6])
    assert init_grb==init_time

    #
    # This could be vectorised - in particular the interpolation may 
    # be faster - however currently each time series is written to 
    # a single seperate file, so looping is not so bad after all.
    # Only motivation to change is if we dump all the output to a 
    # single file (or even sqlite database?)
    #
    for park in locations:
        logger.debug('Time series at %s' % park['name'])
        name     = park['name']
        lat      = float(park['lat'])
        lon      = float(park['lon'])
        hgts     = park['hgt'].strip().split(':')
        var_names = park['vars'].strip().split(':')

        (x,y, dist) = _get_index(lat, lon, data['LAT2D'], data['LON2D'])

        z = data['Z'][:]
        logger.debug("index of nearest grid location: x=%s, y=%s" %(x, y))
        
        for var_name in var_names:

            logger.debug("processing %s" % var_name)

            for hgt_str in hgts:
                hgt   = int(hgt_str)
                level = (np.abs(z-hgt)).argmin()                
                new_hgt = z[level]
                logger.debug('processing level %s: %sm, requested %sm' %(level, new_hgt, hgt))
                
                #
                # DO UGRD, VGRD and speed seperately - this needs cleaned up soon
                #
                var        = data[var_name]
                out_name   = '%s/%s_d%02d_%s_%s_%s.txt' %(ts_dir, name, dom, var_name, hgt, init_time.strftime('%Y-%m-%d_%H'))
                out_file   = open(out_name, 'w')
      
                #
                # Can't use extended selection because of derived variables
                #               
                #interp_str = 'forecast_time0|: lv_HTGL5|%s g3_x_1|i%s g3_y_2|i%s' %(hgt, x, y)
                #logger.debug('interpolating using: [%s] ' %interp_str)
                logger.debug("extracting variable: %s[:, %s, %s, %s]" % (var_name, level, x, y))
                t0 = time.time()
                var_point   = var[:, level, x, y]

                t1 = time.time()
                logger.debug('interpolation done in %0.3f s' % (t1-t0))
    
                logger.debug('shape of point extracted is %s' %var_point.shape)            
                for fhr in data['FCST_TIME'][:]:
                    valid_time = init_time + fhr * HOUR
                    val        = var_point[int(fhr)]
                    delta      = valid_time - init_time  
                    line       = '%s,%s,%s,%02d,%02d,%s,%s,%s,%s,%s,%s,%s,%s\n' %(domain, 
                                                                  model_run, 
                                                                  test_case, 
                                                                  dom, 
                                                                  fhr, 
                                                                  valid_time, 
                                                                  init_time, 
                                                                  var_name,
                                                                  name,
                                                                  lat, 
                                                                  lon, 
                                                                  hgt,
                                                                  val)
                    out_file.write(line)
                out_file.close()
    
    logger.debug("closing dataset: %s" % grb_file)
    dataset.close()



def _read_time_series(fname):
    """ Reads formatted time series"""

    logger = wrftools.get_logger()
    logger.debug('reading tseries file %s' % fname)
    
    #
    # Current format of time series files is:
    # domain_dir  , model_run, test_case, dom, fhr, valid,              init,                var, obs_sid,  lat,   lon,  hgt, val
    # baseline_gfs, test,      A,          01, 00, 2006-11-30 12:00:00, 2006-11-30 12:00:00, UGRD, ormonde,  58.1,  -3.0, 100, 3.416
    #
    names =  ['domain', 'model_run', 'test_case', 'grid_id', 'fhr', 'valid_time', 'init_time', 'var', 'obs_sid', 'lat', 'lon', 'hgt', 'val']
    converters = {5: wrftools.strptime, 6: wrftools.strptime}
    
    rec = np.genfromtxt(fname, delimiter=',', names=names, dtype=None, converters=converters)
    return rec




def power(config):
    """Reads 'time series' file written by wgrib2. Creates arrays of U, V,
    and derives speed and direction. Loads power curve and converts to power. """
    
    #
    # TODO 
    #
    # Really, the writing to and reading from disk of time series could be saved by using a pipe
    # but let's use this method for now as the string parsing will be the same anyway
    #
    logger          = wrftools.get_logger()
    domain          = config['domain']
    test_case       = config['test_case']
    domain_dir      = config['domain_dir']
    model_run       = config['model_run']
    dom             = config['dom']
    init_time       = config['init_time']
    locations_file  = config['locations_file']
    pcurve_dir      = config['pcurve_dir']
    locations       = _read_locations(locations_file)

    ts_dir          = '%s/%s/tseries' % (domain_dir, model_run)
    
    
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    # Remember - grid winds are not rotated to earth winds yet
    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    for park in locations:
        name      = park['name']
        lat       = park['lat']
        lon       = park['lon']
        hgts      = park['hgt'].strip().split(':')
        var_names = park['vars'].strip().split(':')
        logger.info('Predicting power output for %s' % name )

       
        for hgt in hgts:
            u_file        = '%s/%s_d%02d_%s_%s_%s.txt' %(ts_dir, name, dom, 'UGRD', hgt, init_time.strftime('%Y-%m-%d_%H'))
            v_file        = '%s/%s_d%02d_%s_%s_%s.txt' %(ts_dir, name, dom, 'VGRD', hgt, init_time.strftime('%Y-%m-%d_%H'))
            u_series    = _read_time_series(u_file)
            v_series    = _read_time_series(v_file)
            
            times_u     = u_series['valid_time']
            init_times  = u_series['init_time']
            u           = u_series['val']
            
            times_v     = v_series['valid_time']
            init_times  = v_series['init_time']
            v           = v_series['val']
        
            #
            # Create a 'time series' object. Maybe we don't need to create this
            # but will offer us flexibility later.
            #
            tseries =  {'FCST_TIME': np.array(times_u), 'UGRD': np.array(u), 'VGRD': np.array(v)}
          
            #
            # Open power curve
            #
            pcurve = powercurve.from_file('%s/%s.csv' %(pcurve_dir, name))
    
            
            u = tseries['UGRD']
            v = tseries['VGRD']
    
            speed       = (u**2+v**2)**0.5
            direction   = tools.bearing(u,v)
            tseries['direction'] = direction
            
            #
            # TODO: due to the way scipy interpolate works
            # the x and y data must be monotonic
            #
            # To get around this, we just loop over each speed, direction
            # pretty slow, so we should change
            #
            pwr = np.zeros(speed.shape[0])
            for n in range(speed.shape[0]):
                pwr[n] = pcurve.power(speed[n], direction[n]) 
                #logger.debug('%0.2f  %0.2f  %0.2f' %(speed[n], direction[n], pwr[n]))
            
            tseries['PWR'] = pwr
    
    
            #
            # Write the power to a time series as well
            # this will be a slightly different format,
            # but no matter
            # 
            pwr   = tseries['PWR']
            times = tseries['FCST_TIME']
            
            
            #
            # We want this to be a record-based format, mimicking the the mpr format output from
            # met so we can use the same visualisation and error tools
            #
            out_name = '%s/%s_d%02d_%s_%s_%s.txt' %(ts_dir, name, dom, 'PWR', hgt, init_time.strftime('%Y-%m-%d_%H'))
            out_file = open(out_name, 'w')
            for n in range(times.shape[0]):
                valid_time = times[n]
                delta      = valid_time - init_time  
                fhr        =  ((delta.days*24*60*60) + delta.seconds)/3600
                value      = pwr[n]
                line       = '%s,%s,%s,%02d,%02d,%s,%s,%s,%s,%s,%s,%s,%s\n' %(domain, 
                                                                      model_run, 
                                                                      test_case, 
                                                                      dom, 
                                                                      fhr, 
                                                                      valid_time, 
                                                                      init_time, 
                                                                      'POWER',
                                                                      name,
                                                                      lat, 
                                                                      lon, 
                                                                      hgt,
                                                                      value)
                
                out_file.write(line)
                
        logger.info('finished %s' %name)            





#*************************************************************************
# Deprecated
# functions below here are marked for deletion
#*************************************************************************


def get_ij_old(config):
    logger = wrftools.get_logger()    
    logger.info('*** RUNNING NCL EXTRACTION ***')
     
    domain_dir    = config['domain_dir']
    model_run     = config['model_run']
    ncl_nc        = config['ncl_nc']
    ncl_grb       = config['ncl_grb']
    ncl_code_dir  = config['ncl_code_dir']
    ncl_nc_code   = config['ncl_nc_code']
    ncl_grb_code  = config['ncl_grb_code']
    
    wrfout_dir    = '%s/%s/'%(domain_dir, model_run)
    init_time     = config['init_time']
    dom           = config['dom']
    grb_file      = '%s/%s/archive/wrfpost_d%02d_%s.grb'    %(domain_dir, model_run, dom, init_time.strftime(archive_format))
    nc_file       = '%s/%s/wrfout/wrfout_d%02d_%s:00:00.nc' %(domain_dir, model_run, dom, init_time.strftime(archive_format))
    
    ncl_out_dir   = '%s/%s/plots/%s/d%02d/'              %(domain_dir,model_run, init_time.strftime(archive_format), dom)    
    ncl_output    = config['ncl_output']
    namelist_wps  = '%(domain_dir)s/%(model_run)s/namelist.wps' % config

    if not os.path.exists(ncl_out_dir):
        os.makedirs(ncl_out_dir)

    #
    # Communicate to NCL via environment variables
    #
    os.environ['DOMAIN_DIR']    = domain_dir
    os.environ['MODEL_RUN']     = model_run
    os.environ['NCL_OUT_DIR']   = ncl_out_dir
    os.environ['NCL_OUTPUT']    = ncl_output 
    os.environ['NAMELIST_WPS']  = namelist_wps
    os.environ['FCST_FILE']     = grb_file
    cmd    = "ncl %s/wrf_grib_series.ncl" % (ncl_code_dir)
    p      =  subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    lines  = p.stdout.readlines()
    for line in lines:
        if '(0,0)' in line:
            thanet_i = line.split()[-1]
            logger.debug(thanet_i)
        if '(0,1)' in line:
            aberdeen_i = line.split()[-1]
            logger.debug(aberdeen_i)
        if '(1,0)' in line:
            thanet_j = line.split()[-1]
            logger.debug(thanet_j)
        if '(1,1)' in line:
            aberdeen_j = line.split()[-1]
            logger.debug(aberdeen_j)
            
    return [(thanet_i, thanet_j), (aberdeen_i, abderdeen_j)]


def _deprecated_write_time_series_old(config):
    """Writes time series of variables out to csv files.
    Used for later conversion to power. Locations are read from location file  
    Deprecated - relies on grib2 files and wgrib2, use PyNio instead"""

    logger = get_logger()
    logger.info('*** EXTRACTING TIME SERIES ***')

    domain          = config['domain']
    test_case       = config['test_case']
    domain_dir      = config['domain_dir']
    model_run       = config['model_run']
    dom             = config['dom']
    init_time       = config['init_time']
    locations_file  = config['locations_file']
    logger.debug("Reading locations from %s" %locations_file)
    locations       = read_locations(locations_file)
    grb_file        = '%s/%s/archive/wrfpost_d%02d_%s.grib2' %(domain_dir, model_run,dom, init_time.strftime('%Y-%m-%d_%H'))
    ts_dir          = '%s/%s/tseries' % (domain_dir, model_run)

    if not os.path.exists(ts_dir):
        os.mkdir(ts_dir)

    for park in locations:
        logger.debug('Extracting time series at %s' % park['name'])
        name     = park['name']
        lat      = park['lat']
        lon      = park['lon']
        hgts     = park['hgt'].strip().split(':')
        var_names = park['vars'].strip().split(':')
        logger.debug('%s, %s, %s, %s, %s' % (name, lat, lon, hgts, var_names))

        #
        # Do this in loops: slow.
        #

        for var in var_names:
            for hgt in hgts:
                out_name = '%s/%s_d%02d_%s_%s_%s.txt' %(ts_dir, name, dom, var, hgt, init_time.strftime('%Y-%m-%d_%H'))
                out_file = open(out_name, 'w')
                
                #
                # Problem with the csv output is that it wants to dump the whole 
                # grib file to csv format. Can't restrict it to single point.
                # So we will just direct output to a file and have to deal with the 
                # format later
                #
                #cmd = "wgrib2 %s -s -lon %s %s | egrep '%s:%s m above ground' | wgrib2 -i %s -csv %s" % (grb_file, lon, lat, var, hgt, grb_file, out_name)
                cmd = 'wgrib2 %s -s -lon %s %s  -text -lon -match "%s:%s m above ground" > %s' %(grb_file, lon, lat, var, hgt, out_name )
            
                
                #run_cmd(cmd, config)
                #wgrib2 wrfpost_d01_2006-11-30_12.grib2 -s -lon 1.0 58 | egrep 'UGRD:80 m above ground' | wgrib2 -i wrfpost_d01_2006-11-30_12.grib2 -csv out
    
                #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                # Work in progress
                #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                
                #
                # Instead of writing to a file directly from wgrib,
                # pipe the output and reformat before writing the file
                #
                cmd = 'wgrib2 %s -s -lon %s %s  -text -lon -match "%s:%s m above ground" ' %(grb_file, lon, lat, var, hgt)
                logger.debug(cmd)
                p      =  subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                lines  = p.stdout.readlines()
                times = []
                vals  = []
                for line in lines:
                    logger.debug
                    tokens    = line.strip('\n').split(':')
                    init_str  = tokens[2]
                    init_time = datetime.datetime(*time.strptime(init_str, 'd=%Y%m%d%H')[0:5])
                    fhr_token = tokens[5].split()[0].replace('anl', '0')
                    fhr       = int(fhr_token)
                    valid_time = init_time + fhr * HOUR
                    val        = float(tokens[7].split('=')[-1].strip('\n'))
                    delta      = valid_time - init_time  
                    fhr        =  ((delta.days*24*60*60) + delta.seconds)/3600

                    line       = '%s,%s,%s,%02d,%02d,%s,%s,%s,%s,%s,%s,%s,%s\n' %(domain, 
                                                                  model_run, 
                                                                  test_case, 
                                                                  dom, 
                                                                  fhr, 
                                                                  valid_time, 
                                                                  init_time, 
                                                                  var,
                                                                  name,
                                                                  lat, 
                                                                  lon, 
                                                                  hgt,
                                                                  val)
                    logger.debug(line)
                    out_file.write(line)
                
                out_file.close()














