#!/home/disk/pvort/lmadaus/nobackup/pylibs/epd/bin/python

# Script to read in profiles from bufkit in Profile format
# and plot the forecast times
import os, re, sys
from datetime import datetime,timedelta
import pickle
from optparse import OptionParser
import numpy as np

from INFILE import siteid, webdir, UTC_offset, maindir
sys.path.append('%s/data_parsers' % maindir)

# Get the station ID we want from the command line
parser = OptionParser()
parser.add_option('-s', '--site', dest='siteid', action='store', type='string', default=siteid, help='4-letter id of desired site')
parser.add_option('-e', '--export', dest='export', action='store_true',default=False, help='Write images to png or not.')
(opts,args) = parser.parse_args()
siteid = opts.siteid.lower()
export_flag = opts.export


print "Importing surface_parse_bufkit"
from surface_parse_bufkit import bufkit_parser, obs_parser, mos_parser, split_sref
print "Done"

print "Importing matplotlib"
import matplotlib
if export_flag:
    matplotlib.use('agg')

print "Importing pylab"
import pylab

#siteid = 'keri'
# outdir is the directory to move the files to if exporting
#outdir = '/home/disk/pvort/lmadaus/public_html/bufkit/plots'
outdir = webdir



# First, make sure that SREF is updated
os.system('rm -f ../bufrgruven/metdat/bufkit/*.buf')
os.system('rm -f ../bufrgruven/metdat/bufr/*')
os.system('../bufrgruven/bufr_gruven.pl --dset sref --stations %s --noascii --nozipit --forced --forcep' % siteid)

# Only interested in the 24-hour period we're forecasting for
# with a lead-in time.  ftime is the actual times we're
# forecasting between, ptime is the times we're plotting

nowtime = datetime.now()
stime = nowtime.replace(hour=6,minute=0,second=0,microsecond=0)
stime = stime + timedelta(hours=24)
print "HERE", stime.strftime('%Y%m%d%H')

start_ftime = stime
# this must change depending on UTC offset
start_ftime_wind = stime + timedelta(hours=(UTC_offset-6))
lead_time = timedelta(hours=18)
fcst_window = timedelta(hours=24)
end_ftime = start_ftime + fcst_window
end_ftime_wind = start_ftime_wind + timedelta(hours=24)
start_ptime = start_ftime - lead_time
end_ptime = end_ftime + timedelta(hours=6)

# Now make a list of times to plot
timelist = []
t = start_ptime
fcst_delta = timedelta(hours=1)
while t <= end_ptime:
    timelist.append(t)
    t = t + fcst_delta

# Grab the most recent files
# Use Penn State repository for profiles
# Clear all old directories
os.system('rm -f *.buf* *.buz*')

filelist = []

# Copy in the sref files
os.system('cp ../bufrgruven/metdat/bufkit/sref_%s.buf .' % siteid) 
# and split the file
split_sref('sref_%s.buf' % siteid)
# Then remove the original file
os.system('rm -f sref_%s.buf' % siteid)

# Get a list of all files in the directory
# Current directory, just to make sure all downloaded
# correctly
# Only get the bufkit files
cwd = os.getcwd()
allfiles = os.listdir(cwd)

filelist = [f for f in allfiles if '.buf' in f]

# We assume we've grabbed the most recent files
# from each model and there is some overlap.  There
# should only be one profile for each model.
# Filenames should start with the model they are 
# representing. Get a dictionary of model names.
models = {}
for file in filelist:
    # Use a regular expression to grab the model and site
    #linesplit = re.search('(.*)_(.*)(.buf|.buz).?(\d?)',file).groups()
    #linesplit = re.search('sref_(\D{2,3})',file).groups()
    #modelid = linesplit[0]
    modelid = file[5:-4]
    print "MODEL:", modelid
    #print linesplit
    #raw_input()
    #if linesplit[3] != '':
    #  modelid = modelid + '_p%s' % linesplit[3]
    # Call the model parser.  Its ouput will be
    # a dictionary of model forecast times.
    if modelid != 'srefmean':
        profile = bufkit_parser(file)
        models[modelid] = profile
    else:
        # For the srefmean, actually import plot_bufkit_timeheight
        # And make the cloud fraction plot
        from plot_bufkit_timeheight import plot_timeheight
        plot_timeheight(file, 'CFRL', 'SREF', siteid.upper(),webdir)
    os.system('rm -f %s' % file)
#print models.keys()
#print models['nam'].keys()

# Slim down the dictionaries to the times we need
for model in models.keys():
    for t in models[model].keys():
        if t not in timelist:
            del models[model][t]

#print keylist

def C_to_F(TC):
    # Convert Celsius to Fahrenheit
    TF = TC * 9. / 5. + 32
    return TF
def knt_to_mph(SK):
    # Convert Knots to MPH
    SM = SK * 1.15077945
    return SM
def mm_to_in(RM):
    # Convert millimeters to inches
    RI = RM * 0.03937
    return RI


# Make a dictionary for plotting parameters
color_dict = {'em_n1'    : 'cyan',
              'em_n2'    : 'cornflowerblue',
              'em_n3'    : 'darkcyan',
              'em_p1'    : 'dodgerblue',
              'em_p2'    : 'mediumblue',
              'em_p3'    : 'midnightblue',
              'em_ctl'   : 'blue',
              'nmm_ctl'  : 'green',
              'nmm_n1'   : 'olivedrab',
              'nmm_n2'   : 'seagreen',
              'nmm_n3'   : 'darkgreen',
              'nmm_p1'   : 'palegreen',
              'nmm_p2'   : 'SpringGreen',
              'nmm_p3'   : 'limegreen',
              'nmb_n2'   : 'Salmon',
              'nmb_n1'   : 'IndianRed',
              'nmb_ctl' : 'red',
              'nmb_p1'   : 'crimson',
              'nmb_p2'   : 'brown',
              'nmb_p3' : 'DarkOrange',
              'nmb_n3'   : 'Coral'}


def compute_mean(modlist,var,datelist):
    import numpy as np
    # Take the models and compute one additional model as the mean
    datelist.sort()
    mean_var = []
    for date in datelist:
        var_list = []
        for model in modlist:  
            if var == 'temp':
                var_list.append(C_to_F(models[model][date].tmpc))
            elif var == 'wspd':
                var_list.append(models[model][date].sknt)
            elif var == 'precip':
                var_list.append(0.0)
        #print date,np.mean(var_list),var_list
        
        mean_var.append(np.mean(var_list))
    #print mean_var
    return mean_var  

def compute_median(modlist,var,datelist):
    import numpy as np
    # Take the models and compute one additional model as the median
    datelist.sort()
    median_var = []
    for date in datelist:
        var_list = []
        for model in modlist:  
            if var == 'temp':
                var_list.append(C_to_F(models[model][date].tmpc))
            elif var == 'wspd':
                var_list.append(models[model][date].sknt)
            elif var == 'precip':
                var_list.append(0.0)
        #print date,np.mean(var_list),var_list
        
        median_var.append(np.median(var_list))
    #print median_var
    return median_var  

def plot_models(var,modlist,oblist,moslist):
    # Now we can get down to plotting.  First, plot the temperature
    pylab.figure(figsize=(12,11))
    matplotlib.rcParams['xtick.minor.pad']=15
    # Accumulate sorted lists
    for model in modlist+moslist:
        var_list = []
        # get a sorted list of keys
        keysorted = models[model].keys()

        keysorted.sort()
        #print model, models[model].keys()
        #raw_input()
        # Now get the variable for each key
        for date in keysorted:
            if var == 'temp':
                if model.startswith('eta') and keysorted.index(date)==0:
		    #if keysorted.index(date)==0:
                    #print model, C_to_F(models[model][date].tmpc)
                    var_list.append(np.nan)
                    #var_list.append(C_to_F(models[model][date].tmpc))
                else:
                    var_list.append(C_to_F(models[model][date].tmpc))
            elif var == 'wspd':
                var_list.append(models[model][date].sknt)
            elif var == 'precip':
                if date <= start_ftime:
                    var_list.append(0.0)
                else:
                    if model.endswith('mos'):
                        var_list.append(0.0)
                    else:
                        if models[model][date].p01m >= 0.0:
                            var_list.append(var_list[-1]+mm_to_in(models[model][date].p01m))
                        else:
                            var_list.append(var_list[-1]+0.0)

        # Now plot
        if model.endswith('mos'):
            pylab.plot(keysorted,var_list,color=color_dict[model],linestyle='dashed',linewidth=2)
        else:
            pylab.plot(keysorted,var_list,color=color_dict[model],linewidth=1)
        #if var == 'precip':
        #    print model, var_list
        #    raw_input()

    # Now plot the mean
    #mean_list = compute_mean(modlist,var,keysorted)
    #print mean_list
    #pylab.plot(keysorted,mean_list,color='k',linewidth=3,linestyle='dashed')
    # Changed 5/22/2012 -- now plotting the median
    median_list = compute_median(modlist,var,keysorted)
    pylab.plot(keysorted,median_list,color='k',linewidth=3,linestyle='dashed')

    # Now plot the observations
    keysorted = oblist.keys()
    keysorted.sort()
    var_list = []
    for date in keysorted:
        if var == 'temp':
            var_list.append(C_to_F(oblist[date].tmpc))
        elif var == 'wspd':
            var_list.append(oblist[date].sknt)
        elif var == 'precip':
            if date <= start_ftime:
                var_list.append(0.0)
            else:
                var_list.append(var_list[-1]+mm_to_in(oblist[date].p01m))
    pylab.plot(keysorted,var_list,'ko:',linewidth=2,markersize=5) 
    ax = pylab.gca()


    # For temperature, add a text box with the MOS max and min
    #if var == 'temp':
    #    txtb = ''
    #    for model in hilo_model.keys():
    #        hi,lo = hilo_model[model][start_ftime.date()][0:2]
    #        txtb = txtb + '%s  H: %d  L: %d\n' % (model.upper(),hi,lo)
    #    txtb = txtb[:-1]
    #    pylab.text(0.45,0.9,txtb,fontsize=14,transform = ax.transAxes,bbox=dict(facecolor='gray',alpha=0.1))

    unit_dict = {'temp':'F',
                 'wspd':'kts',
                 'precip':'in.'}

    # Some things to make the figure look nice
    pylab.title('SREF Forecast %s at %s' % (var,siteid.upper()))
    pylab.xlabel('Valid Time')
    pylab.ylabel('%s [%s]' % (var.upper(),unit_dict[var]))
    if var == 'precip':
        # Put the legend at the top
        leg = pylab.legend(modlist+moslist,loc=9,ncol=7,mode='expand')
    elif var == 'wspd':
        # Put the legend in best spot
        leg = pylab.legend(modlist+moslist,loc=9,ncol=7,mode='expand')
    else:
        # Put the legend at the bottom
        leg = pylab.legend(modlist+moslist,loc=8,ncol=7,mode='expand')


    # Make the legend transparent
    leg.get_frame().set_alpha(0.5)
    ltexts = leg.get_texts()
    pylab.setp(ltexts,fontsize='small')

    # Grey out non-forecast period
    if var == 'wspd':
        pylab.axvspan(matplotlib.dates.date2num(start_ptime),matplotlib.dates.date2num(start_ftime_wind),facecolor='0.8',alpha=0.40)
        pylab.axvspan(matplotlib.dates.date2num(end_ftime_wind),matplotlib.dates.date2num(end_ptime),facecolor='0.8',alpha=0.40)
    else:
        pylab.axvspan(matplotlib.dates.date2num(start_ptime),matplotlib.dates.date2num(start_ftime),facecolor='0.8',alpha=0.40)
        pylab.axvspan(matplotlib.dates.date2num(end_ftime),matplotlib.dates.date2num(end_ptime),facecolor='0.8',alpha=0.40)
    # Reformat the date axis
    ax.set_xlim([matplotlib.dates.date2num(start_ptime),matplotlib.dates.date2num(end_ptime)])
    min,max = ax.get_ylim()
    if var == 'temp':
        ax.set_yticks(range(int(min),int(max)))
    elif var == 'wspd':
        ax.set_yticks(range(0,int(max)))

    ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
    ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%HZ'))
    ax.xaxis.set_minor_locator(matplotlib.dates.DayLocator())
    ax.xaxis.set_minor_formatter(matplotlib.dates.DateFormatter('%h %d'))
    pylab.grid()
    if export_flag:
        pylab.savefig('%s_%s_sref.png' % (siteid.upper(),var),bbox_inches='tight')
        os.system('mv %s_%s_sref.png %s/%s_%s_sref.png' % (siteid.upper(),var,outdir,siteid.upper(),var))
    else:
        pylab.show()
        


        
modlist = models.keys()
model_init = {}
# Get model init times
#for model in modlist:
#    times = models[model].keys()
#    times.sort()
#    model_init[model] = times[0]
# Sort these by init time
hilo_model = {}

# Get Mos dictionary
#moslist = ['gfs_mos','nam_mos']
#for modtyp in moslist:
#    models[modtyp],hilo_model[modtyp] = mos_parser(siteid,modtyp[0:3])
moslist = []

# Get observations
oblist,xnlist = obs_parser(siteid)

moslist.sort()
modlist.sort()

# More refined figuring of the models

plot_models('temp',modlist,oblist,moslist)
plot_models('wspd',modlist,oblist,moslist)
plot_models('precip',modlist,oblist,moslist)

