#!/usr/bin/env python

from optparse import OptionParser
# Get the station ID we want from the command line
parser = OptionParser()
parser.add_option('-s', '--site', dest='siteid', action='store', type='string', default='KSEA', help='4-letter id of desired site')
parser.add_option('-e', '--export', dest='export', action='store_true',default=False, help='Write images to png or not.')
(opts,args) = parser.parse_args()
siteid = opts.siteid.upper()
export_flag = opts.export


if export_flag:
    import matplotlib
    matplotlib.use('agg')
    import matplotlib.pyplot as plt
else:
    import matplotlib
    import matplotlib.pyplot as plt
import pickle
import os
import numpy as np
from datetime import datetime, timedelta



def main():
    verify_d = get_verification(siteid)
    performance = compute_errors(verify_d, siteid)
    plot_performance(performance)
    rank_histogram(siteid)




def make_bma_ensemble(archive):
    # Import some utilities
    from plot_current_gfse import BMA_compute, bias_correct

    # For each date in the ensemble, need to make a BMA curve, then re-draw
    # the ensemble from that distribution
    mems = archive.keys()
    arch_dates = archive[mems[0]].keys()

    outens = {}
    for d in arch_dates:
        bc_ens = {'High BC' : {},
                  'Low BC'  : {}}
        for mem in mems:
            hi,lo = bias_correct(str(mem),archive[mem][d]['high'],archive[mem][d]['low'],siteid.upper())
            bc_ens['High BC'][str(mem)] = hi
            bc_ens['Low BC'][str(mem)] = lo

        # Get the BMA curve
        curvex = np.linspace(0,110,111)
        bma_high, bma_mean_high = BMA_compute(bc_ens,curvex,'HIGH',siteid.upper())
        bma_low, bma_mean_low = BMA_compute(bc_ens,curvex,'LOW',siteid.upper())

        # Now redraw a n-member ensemble from these distribuions
        #high_ens = np.random.choice(curvex,len(mems),p=bma_high)
        #low_ens = np.random.choice(curvex,len(mems),p=bma_low)
        occurs_high = np.random.multinomial(len(mems),bma_high)
        occurs_low = np.random.multinomial(len(mems),bma_low)
        high_ens = []
        low_ens = []
        for numtime,val in zip(occurs_high,curvex):
            if numtime > 0:
                for r in range(numtime):
                    high_ens.append(val)
        for numtime,val in zip(occurs_low,curvex):
            if numtime > 0:
                for r in range(numtime):
                    low_ens.append(val)



        outens[d] = {}
        outens[d]['highs_bmamems'] = high_ens
        outens[d]['lows_bmamems'] = low_ens
    return outens



def rank_histogram(siteid):
    try:
        modelout = pickle.load(open('%s_gfse_performance.pickle' % siteid.upper(),'r'))
    except:
        print "Unable to find model archive: %s_gfse_performance.pickle" % siteid.upper()
        exit(1)

    # Also load the archive
    archive = pickle.load(open('%s_gfse_archive.pickle' % siteid.upper(), 'r'))
    # Have to reconstruct this dictionary
    mems = archive.keys()
    arch_dates = archive[mems[0]].keys()
    arch_dates.sort()
    for d in arch_dates:
        highs = [archive[m][d]['high'] for m in mems]
        lows = [archive[m][d]['low'] for m in mems]
        modelout[d] = {}
        modelout[d]['highs_gfsemems'] = highs
        modelout[d]['lows_gfsemems'] = lows

    # generate a BMA ensemble for the archive
    bmaens = make_bma_ensemble(archive)


    truth = get_verification(siteid)
    # Get a list of dates
    dates = modelout.keys()
    # Remove today
    dates.sort()
    dates = dates[:-1]

    # And now get the list of model values
    for var in ('highs','lows'):
        # Reduce the date list to the appropriate times where data is available
        newdates = [d for d in dates if '%s_gfsemems' % var in modelout[d].keys()]
        newdates.sort()

        varlist = [modelout[d]['%s_gfsemems' % var] for d in newdates]
        truthlist = [truth[d][var[:-1]] for d in newdates]
        bmadates = bmaens.keys()
        bmadates.sort()
        bmalist = [bmaens[d]['%s_bmamems' % var] for d in bmadates]
        bma_truthlist = [truth[d][var[:-1]] for d in bmadates]


        # Now set up the histogram
        bins = np.zeros(len(varlist[0])+1)

        for v,t in zip(varlist,truthlist):
            larger_than = [r for r in v if t < r]
            binnum = len(larger_than)
            try:
                bins[binnum] = bins[binnum] + 1.
            except:
                print 'ERROR on bincount'
                print 'BINNUM:', binnum
                print 'BIN LEN:', len(bins)

        # Compute the fraction
        newbins = [b/len(newdates) for b in bins]
        bins = newbins

        # Now do a bar chart
        plt.figure(figsize=(12,8))
        plt.subplot(121)
        plt.bar(range(len(bins)),bins,align='center',width=1.0)
        plt.title('%s rank histogram of GFSE at %s' % (var.upper(), siteid.upper()))
        plt.xlabel('Rank')
        plt.ylabel('Fraction')
        # Make a horizontal "ideal" line
        ideal_val = 1./len(bins)
        plt.axhline(y=ideal_val,linestyle='dashed',color='k')
        ax = plt.gca()
        ax.set_xlim((-0.5,21.5))
        yset = ax.get_ylim() 

        # Now set up the histogram for the bma
        bins = np.zeros(len(bmalist[0])+1)

        for v,t in zip(bmalist,bma_truthlist):
            larger_than = [r for r in v if t < r]
            binnum = len(larger_than)
            try:
                bins[binnum] = bins[binnum] + 1.
            except:
                print 'ERROR on bincount'
                print 'BINNUM:', binnum
                print 'BIN LEN:', len(bins)

        # Compute the fraction
        newbins = [b/len(bmadates) for b in bins]
        bins = newbins

        # Now do a bar chart
        plt.subplot(122)
        plt.bar(range(len(bins)),bins,align='center',width=1.0,color='g')
        plt.title('BMA values')
        plt.xlabel('Rank')
        plt.ylabel('Fraction')
        # Make a horizontal "ideal" line
        ideal_val = 1./len(bins)
        plt.axhline(y=ideal_val,linestyle='dashed',color='k')
        ax = plt.gca()
        ax.set_xlim((-0.5,21.5))
        ax.set_ylim(yset)

        if export_flag:
            plt.savefig('%s_%s_rankhist.png' % (siteid.upper(),var.upper()),bbox_inches='tight')
            plt.close()
        else:
            plt.show()




def plot_performance(ind):
    # Some color dicts
    lineco = {'opererr'   : 'black',
              'medianerr' : 'blue',
              'bmaerr'    : 'magenta'}


    # Make a list of dates
    datelist = ind.keys()
    datelist.sort()
    errorlists = {}
    for var in ind[datelist[0]].keys():
        # Loop through all dates and make a list of errors
        errorlists[var] = []
        for dt in datelist:
            errorlists[var].append(ind[dt][var])
    # Now for different plots
    # First, accumulated sum of errors
    plt.figure()
    for var in errorlists.keys():
        if not var.endswith('err'):
            continue
        abslist = [abs(r) for r in errorlists[var]]
        accum_list = [np.sum(abslist[0:r]) for r in range(len(abslist))]
        print var, abslist
        for cs in lineco.keys():
            if var.endswith(cs):
                linecol = lineco[cs]

        if var.startswith('hi'):
            plt.plot(datelist,accum_list,color=linecol,linewidth=2,label=var)
        else:
            plt.plot(datelist,accum_list,color=linecol,linewidth=2,linestyle='dashed',label=var)
        plt.hold(True)
    plt.hold(False)
    plt.title('Accumulated error scores')
    plt.xlabel('Date')
    plt.ylabel('Total error points')
    ax=plt.gca()
    ax.xaxis.set_major_locator(matplotlib.dates.DayLocator())
    ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%h %d'))
    plt.legend(loc=2)
    if export_flag:
        plt.savefig('%s_accum_scores.png' % siteid.upper(), bbox_inches='tight')
    else:
        plt.show()

    
    # Now check the distribution
    # Need the 50th and 90th percentiles
    #print  errorlists.keys()
    highs_in50 = [r + s for r,s in zip(errorlists['highs_high50'],errorlists['highs_low50'])]
    highs_in90 = [r + s for r,s in zip(errorlists['highs_high90'],errorlists['highs_low90'])]
    lows_in50 = [r + s for r,s in zip(errorlists['lows_high50'],errorlists['lows_low50'])]
    lows_in90 = [r + s for r,s in zip(errorlists['lows_high90'],errorlists['lows_low90'])]
    
    high_50 = np.sum(highs_in50) / float(len(highs_in50))
    high_90 = np.sum(highs_in90) / float(len(highs_in90))
    low_50 = np.sum(lows_in50) / float(len(lows_in50))
    low_90 = np.sum(lows_in90) / float(len(lows_in90))


    plt.figure()
    rects = [None]*2
    #print "Variable ", r
    #print "Met 50th percentile:", np.sum(in_50)/float(len(in_50))
    #print "Met 90th percentile:", np.sum(in_90)/float(len(in_90))
    #print "-----------------------------------------"
    rects[0] = plt.bar([1+(0.2*0),2+(0.2*0)],[high_50,high_90],0.2,color=['r'])
    plt.hold(True)
    rects[1] = plt.bar([1+(0.2*1),2+(0.2*1)],[low_50,low_90],0.2,color=['b'])
    plt.legend([block[0] for block in rects],('High','Low'),loc=0)
    plt.xticks([1.2,2.2],('90th Percentile','50th Percentile'))
    plt.ylabel('Actual percentage verified')
    plt.axhline(y=0.5,color='k',linestyle='--')
    plt.axhline(y=0.9,color='k',linestyle='--')
    if export_flag:
        plt.savefig('%s_pctile_verify.png' % siteid.upper(), bbox_inches='tight')
    else:
        plt.show()



def compute_errors(verification, siteid):
    # Try to load the model archive
    try:
        modelout = pickle.load(open('%s_gfse_performance.pickle' % siteid.upper(),'r'))
    except:
        print "Unable to find model archive: %s_gfse_performance.pickle" % siteid.upper()
        exit(1)

    # Now build error dictionary
    errord = {}
    for dt in modelout.keys():
        if dt in verification.keys():
            if not dt in errord.keys():
                errord[dt] = {}
            for var in modelout[dt]:
                if var.startswith('highs'):
                    realval = verification[dt]['high']
                else:
                    realval = verification[dt]['low']

                if not var.endswith('90') and not var.endswith('50') and not var.endswith('emems'):
                    #print var, "MODEL:", modelout[dt][var], realval
                    errord[dt][var + 'err'] = modelout[dt][var] - realval
                else:
                    if var.endswith('high90') or var.endswith('high50'):
                        if modelout[dt][var] <= realval:
                            errord[dt][var] = True 
                        else:
                            errord[dt][var] = False
                    else:
                        if modelout[dt][var] >= realval:
                            errord[dt][var] = True
                        else:
                            errord[dt][var] = False



    return errord


def get_verification(siteid):
    from encode_truth import encode_truth
    verify_d = encode_truth(siteid)
    return verify_d

if __name__ == '__main__':
    main()

