"""
This is the driver for making the rivers.nc file.

Eventually it should be a function that accepts the time span as an argument.

3/20/2014  Parker MacCready
"""

# imports
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
import Rfun
reload(Rfun)
Ldir = Rfun.get_ldir()
Ldir['run'] = Ldir['res'] + Ldir['gtag'] +'/'

# some screen output
from datetime import datetime
date_string = datetime.now().strftime('%Y.%m.%d')
time_string = datetime.now().strftime('%H:%M:%S')
print '*** make_rivers_main.py *** ' + date_string + ' ' + time_string

# get the list of rivers that we need for this run
rnames_frame = pd.read_csv(Ldir['run'] + 'rname_list.txt', header=None, names=['River Name'])
rnames_full = rnames_frame['River Name'].values
rnames_full = rnames_full.tolist()

# swap some names
try:
    rnames_full[rnames_full.index('duwamish')] = 'green'
    rnames_full[rnames_full.index('hammahamma')] = 'hamma'
except:
    print('Problem swapping river names')

# DEBUGGING
if True:
    # use a list of all the rivers
    rnames = rnames_full
else:
    # or just some
    rnames = ['skagit',  'columbia', 'fraser']

# initialize the lists
usgs_id = []
nws_id = []
can_id = []
scale_factor = []

print_stuff = False
if print_stuff: # helpful for debugging, forst print a header line
    print('%15s %10s %10s %10s %10s %10s %10s' % ('name', 'USGS_ecy', 'ECY_sf',
    'NWS_code', 'Has_NWS', 'USGS_nws', 'code_usgs'))

# step through the river names    
for rn in rnames_full:
    
    # this function reads a file that has scale factors linked to USGS numbers (right?)
    code_ecology, scale_factor_ecology = Rfun.get_river_code_ecology(rn)
    
    # this function reads a file that gives NWS (forecast) codes for some USGS gauged rivers
    code_nws, has_nws_forecast, code_nws_usgs = Rfun.get_river_code_nws(rn)
    
    # set code_usgs  
    if code_ecology == code_nws_usgs:
        code_usgs = code_nws_usgs
    elif (code_ecology == 'MISSING' or code_ecology == 'Hybrid') and code_nws_usgs != 'MISSING':
        code_usgs = code_nws_usgs
    elif code_nws_usgs == 'MISSING' and (code_ecology != 'MISSING' and code_ecology != 'Hybrid'):
        code_usgs = code_ecology
    else:
        code_usgs = 'GACK'
    
    # intervene by hand for skagit_south   
    if rn == 'skagit_south':
        code_usgs = '12200500'
        scale_factor_ecology = 0
    
    # set code_can (Canadian)    
    if rn == 'fraser':
        code_can = code_ecology
        code_usgs = 'MISSING'
    else:
        code_can = 'MISSING'
    
    # set code_nws 
    if has_nws_forecast == 'YES':
        code_nws = code_nws
    elif has_nws_forecast == 'NO':
        code_nws = 'MISSING'
    else:
        code_nws = 'GACK'
    
    # then add items to the lists    
    usgs_id.append(code_usgs)
    nws_id.append(code_nws)
    can_id.append(code_can)
    scale_factor.append(scale_factor_ecology)
    
    if print_stuff: # helpful for debugging
        print('%15s %10s %10.2f %10s %10s %10s %10s' % (rn, code_ecology, scale_factor_ecology,
        code_nws, has_nws_forecast, code_nws_usgs, code_usgs))

# put the results in a DataFrame
rdf = pd.DataFrame(index=rnames_full)
rdf.index.name = 'River_Name'
rdf['NWS_ID'] = nws_id
rdf['USGS_ID'] = usgs_id
rdf['CAN_ID'] = can_id
rdf['Scale_Factor'] = scale_factor
# tidy!

# next we go through the rivers in rdf and get the data for each
riv_dict = {}
good_riv_list = []
bad_riv_list = []

for rn in rnames: #rdf.index:
    # set up some things
    got_nws_data = False
    got_usgs_data = False
    
    # get the ID's and scale factor for this river
    aa = rdf.ix[rn]
    
    # get the NWS Forecast
    if aa['NWS_ID'] != 'MISSING':
        id = aa['NWS_ID']
        print '** working on ' + rn + ' NWS: ' + id        
        qt, got_nws_data, memo = Rfun.get_nws_data(id)
        print '  -----' + memo + '-------'
        # and if that fails try the USGS  recent observations
        if got_nws_data == False:
            id = aa['USGS_ID']
            print '  >> trying USGS: ' + id
            qt, got_usgs_data, memo = Rfun.get_usgs_data(id)
            print '  -----' + memo + '-------'
            
    # then do some processing and pack them in a dict
    if got_nws_data | got_usgs_data:  
        # fix time zone (e.g. USGS reports in local time)    
        # qt = qt.tz_convert('UTC')
        # 3/21/2014 Why doens't this work anymore - as of move to Canopy
        # block average to daily values
        qt = qt.resample('D', how='mean', label='right', loffset='-12h')
        # fix the scaling
        qt = qt*aa['Scale_Factor']
        
        # pack into an item in a dict
        riv_dict[rn] = qt
        good_riv_list.append(rn)
    else:
        bad_riv_list.append(rn)
        
# convert riv_dict to a DataFrame (plot using riv_df.plot() - very cool!)
riv_df = DataFrame(riv_dict)

# make sure all values are filled (assumes persistence is OK)
riv_df = riv_df.fillna(method='bfill')
riv_df = riv_df.fillna(method='ffill')

# add columns for the missing rivers (will fill with climatology)
# there has to be a cleaner way to do this!
missing_riv_list = [val for val in rnames_full if val not in good_riv_list]
for rn in missing_riv_list:
    riv_df[rn] = np.nan
# riv_df should now have columns for ALL the rivers, many with no data

# load the climatology
# for the flow
clim_fn = './river_climatology/Qclim.csv'
clim_df = pd.read_csv(clim_fn, index_col='yearday')
# and for temperature
tclim_fn = './river_climatology/Tclim.csv'
tclim_df = pd.read_csv(tclim_fn, index_col='yearday')
# note clim_df.plot() makes a nice plot of this!

# use climatology for missing values
riv_df_new = riv_df.copy()
for rn in riv_df:
    this_riv = riv_df[rn]
    isgood = this_riv.isnull()
    for t in this_riv.index:
        if isgood[t]:
            yd = t.dayofyear
            cval = clim_df.ix[yd-1, rn] * rdf.ix[rn, 'Scale_Factor']
            riv_df_new.ix[t, rn] = cval
            
# make temperature purely from climatology
triv_df = DataFrame(np.nan, index=riv_df_new.index, columns=riv_df_new.columns)
triv_df_new = triv_df.copy()
for rn in triv_df:
    this_triv = triv_df[rn]
    isgood = this_triv.isnull()
    for t in this_triv.index:
        if isgood[t]:
            yd = t.dayofyear
            cval = tclim_df.ix[yd-1, rn]
            triv_df_new.ix[t, rn] = cval

# make sure that the columns are in the correct order
riv_df_new = riv_df_new.reindex(columns=rnames_full)
triv_df_new = triv_df_new.reindex(columns=rnames_full)

# it would be nice to check here that the result is good (no NaN's)

# create a DataFrame that uses seconds since 1/1/1970 as its index
# add a column of seconds since 1970/01/01
tt = riv_df_new.index   
TT = []
for ttt in tt:    
    TT.append(ttt.value)
T = np.array(TT)
Tsec = T/1e9

riv_df_for_matlab = DataFrame(riv_df_new.values, index=Tsec, columns=riv_df_new.columns)
riv_df_for_matlab.index.name = 'Tsec'
# and write out the file for Matlab to use
riv_df_for_matlab.to_csv('current_river_table.csv')

triv_df_for_matlab = DataFrame(triv_df_new.values, index=Tsec, columns=triv_df_new.columns)
triv_df_for_matlab.index.name = 'Tsec'
# and write out the file for Matlab to use
triv_df_for_matlab.to_csv('current_triver_table.csv')

# finally call the Matlab code to write to rivers.nc, and write a LOG ENTRY
from datetime import datetime
date_string = datetime.now().strftime('%Y.%m.%d')
time_string = datetime.now().strftime('%H:%M:%S')
logfile_name = Ldir['out'] + Ldir['gtag'] + '/RIVER_LOG.csv'
try:
    import subprocess
    subprocess.call([Ldir['which_matlab'],"-nojvm -nodisplay < make_river_netcdf_for_ROMS.m >& matlab_screen_output"])
    
    # write a line to the log
    f_string = 'f' + date_string # presumably what MATLAB named it (risky!)
    log_string = Ldir['gtag'] + ',' + f_string + ',' + 'rivers.nc' + ',' + date_string + ',' + time_string + '\n'    
    with open(logfile_name,'a') as file:
        file.write(log_string)
except:
    # write a line to the log
    f_string = 'f' + date_string # presumably what MATLAB named it (risky!)
    log_string = Ldir['gtag'] + ',' + f_string + ',' + 'FAILED' + ',' + date_string + ',' + time_string + '\n'
    with open(logfile_name,'a') as file:
        file.write(log_string)
        
# last screen output
print '*** DONE ***'






