#############################################################################################
#############################################################################################
# 
#   Open Source License/Disclaimer, Forecast Systems Laboratory NOAA/OAR/GSD, 
#   325 Broadway Boulder, CO 80305
#
#   This software is distributed under the Open Source Definition, which may be 
#   found at http://www.opensource.org/.
#   In particular, redistribution and use in source and binary forms, with or 
#   without modification, are permitted provided that the following conditions are met:
#
#   - Redistributions of source code must retain this notice, this list of 
#     conditions and the following disclaimer.
#
#   - Redistributions in binary form must provide access to this notice, this 
#     list of conditions and the following disclaimer, and the underlying source code.
#
#   - All modifications to this software must be clearly documented, and are 
#     solely the responsibility of the agent making the modifications.
#
#   - If significant modifications or enhancements are made to this software, 
#     the GSD Software Policy Manager (softwaremgr.fsl@noaa.gov) should be notified.
#
#   THIS SOFTWARE AND ITS DOCUMENTATION ARE IN THE PUBLIC DOMAIN AND ARE 
#   FURNISHED "AS IS." THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES,
#   OFFICERS, EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO
#   THE USEFULNESS OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME 
#   NO RESPONSIBILITY (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE
#   TECHNICAL SUPPORT TO USERS. 
#############################################################################################
#############################################################################################

import sys, math

try:
    import numpy as np
except:
    raise ImportError('The numpy library cannot be found!')

try:
    from scipy.optimize import minimize
except:
    raise ImportError('The scipy.optimize library cannot be found!')

try:
    from ..analysis_generator_class import AnalysisGeneratorClass
except:
    raise ImportError('The AnalysisGeneratorClass library cannot be found!')


class VAR(AnalysisGeneratorClass):
    # This is an implementation of a variational method
    # Each ensemble member is updated separately and the full
    # ensemble covariance is never formed.
    def __init__(self,optimization_algrm_choice='lbfgs'):
        # INPUT: {numpy arrays}
        #      <Data Array> = (measurement size)x(ensemble size)
        #      <Data Covariance> = (measurement size)x(measurement size)
        #      <Ensemble Array> = (simulation size)x(ensemble size)
        #      <Parameter Array> = (parameter size)x(ensemble size)
        #      <Ensemble Observation Array> = (measurement size)x(ensemble size)
        #
        # RETURN: {numpy arrays}
        #      <Analysis Array> = (simulation size)x(ensemble size)
        #      <Analysis Parameter Array> = (ensemble size)x(parameter size)
        self.optimization_algrm_choice =optimization_algrm_choice
        self.Name = 'Variational Data Assimilation'
        self.total_obs_number = 0
    
    # Self description
    def __repr__(self):
        return '*'*50+'\n'+ \
                self.Name +'\n'+ \
                'Using '+self.optimization_algrm_choice+' scheme to minimize\n'+ \
                '*'*50
    
    def costFunc_and_grad(self,x,*args):

        domain = args[0]
        obs_list = args[1]
        innov_time = args[2]
        Rinv = args[3]
        fg = args[4]
        Binv = args[5]
    
        # Calculate the Jb
        # (X-Xb)^T x B^(-1) x (X-Xb)
        delta_x = x - fg.reshape(x.size)
        Jb = np.dot( np.dot( delta_x.transpose(), Binv ), delta_x )
   
        # Calculate the Gb
        # B^(-1) x (X-Xb)
        Gb = np.dot( Binv, delta_x )

        # Calculate innovations
        
        # Patch the boundaries on initial condition
        x = domain.patch_boundaries(x)
        
        # x is the vector shape, tranform it to matrix IC shape
        domain.ic = x.reshape(domain.ic.shape)
        
        # Call model propagator
        traj = domain.model_integration()
        
        # Strip off the boundaries again on x for DA
        x = domain.strip_off_boundaries(x)
    
        # Initial the Jo and Go
        Jo = 0.
        Go = np.zeros_like( x )
    
        # Allocate and set adjoint forcing to ZERO
        domain.forcing_ad = np.zeros( (len(domain.history),)+ domain.ic.shape )

        self.total_obs_number = 0
        
        # Calculate the innovations 
        for time in domain.history:
        
            if time in innov_time:
                
                # Strip off the boundaries of traj for DA
                hx = domain.strip_off_boundaries(traj[np.where(domain.history == time),:].reshape(domain.ic.shape))
    
                # H(X) - Y
                innovation = hx.reshape(x.size) - obs_list[innov_time.index(time)].reshape(x.size)
                obs_number = innovation[~np.isnan(innovation)].size
                self.total_obs_number += obs_number
                innovation[np.isnan(innovation)] = 0.0
            
                # Calculate Jo = Sum_(t=0,T) ( (H(X) - Y)^T x R^(-1) x (H(X) -Y) )
                Jo_partial = np.dot( np.dot( innovation.transpose(), Rinv[innov_time.index(time)] ), innovation )
                Jo = Jo + Jo_partial
                
                # Calculate Adjoint forcing and reshape to vector shape
                # Go = H x R^(-1) x ( H(X) - Y )
                forcing_ad = np.dot( Rinv[innov_time.index(time)], innovation ).reshape(fg.shape)
            else :
                # Calculate Adjoint forcing
                forcing_ad = np.zeros_like(fg)
            
            # Patch the boundaries and save the forcing into the list on corresponding time
            domain.forcing_ad[np.where(domain.history == time),:] = domain.patch_boundaries(forcing_ad)
 
        # Initializing model gradient at the end of the window (to ZERO)
        domain.model_init_ad()
                
        # Call adjoint propagate
        domain.model_integration_ad()
                
        # Strip off the boundaries of the gradient at the initial time
        gradient = domain.strip_off_boundaries(domain.ic_ad)
        
        # Reshape gradient to vector shape
        Go = gradient.reshape(fg.size)

        # Return the cost function and gradient
        return 0.5 * (Jb + Jo), (Gb + Go).reshape(len(Gb + Go))
    
    def costFunc(self,x,*args):

        domain = args[0]
        obs_list = args[1]
        innov_time = args[2]
        Rinv = args[3]
        fg = args[4]
        Binv = args[5]
    
        # Calculate the Jb
        delta_x = x - fg.reshape(x.size)
        Jb = np.dot( np.dot( delta_x.transpose(), Binv ), delta_x )

        # Calculate innovations
    
        # Patch the boundaries for model propgator
        x = domain.patch_boundaries(x)
        
        # Call model propagate 
        domain.ic = x.reshape(domain.ic.shape)
        traj = domain.model_integration()
    
        # Strip off the boundaries again on x for DA
        x = domain.strip_off_boundaries(x)

        # Initial the Jo and Go
        Jo = 0.
    
        self.total_obs_number = 0.0

        # Calculate the innovations 
        for time in domain.history:
        
            if time in innov_time:

                # Strip off the boundaries of traj for DA
                hx = domain.strip_off_boundaries(traj[np.where(domain.history == time),:].reshape(domain.ic.shape))
    
                innovation = hx.reshape(x.size) - obs_list[innov_time.index(time)].reshape(x.size)

                self.total_obs_number += innovation[~np.isnan(innovation)].size
                innovation[np.isnan(innovation)] = 0.0
            
                # Calculate Jo
                Jo = Jo + np.dot( np.dot( innovation.transpose(), Rinv[innov_time.index(time)] ), innovation )
        
        return 0.5 * (Jb + Jo)
    
    def grad(self,x,*args):

        domain = args[0]
        obs_list = args[1]
        innov_time = args[2]
        Rinv = args[3]
        fg = args[4]
        Binv = args[5]
    
        # Calculate the Dealt X
        delta_x = x - fg.reshape(x.size)
   
        # Calculate the Gb
        Gb = np.dot( Binv, delta_x )

        # Calculate innovations
    
        # Patch the boundaries for model propgator
        x = domain.patch_boundaries(x)
        
        # Call model propagate 
        domain.ic = x.reshape(domain.ic.shape)
        traj = domain.model_integration()
    
        # Strip off the boundaries again on x for DA
        x = domain.strip_off_boundaries(x)

        # Initial the Go
        Go = np.zeros_like( x )
    
        # Initializing model gradient
        # This is necessary
        domain.model_init_ad()
    
        # Allocate and set adjoint forcing to ZERO
        domain.forcing_ad = np.zeros( (len(domain.history),)+ domain.ic_ad.shape )
    
        # Calculate the innovations 
        for time in domain.history:
        
            if time in innov_time:

                # Strip off the boundaries of traj for DA
                hx = domain.strip_off_boundaries(traj[np.where(domain.history == time),:].reshape(domain.ic.shape))
    
                innovation = hx.reshape(x.size) - obs_list[innov_time.index(time)].reshape(x.size)
                innovation[np.isnan(innovation)] = 0.0
    
                # Calculate Adjoint forcing
		forcing_ad = np.dot( Rinv[innov_time.index(time)], innovation ).reshape(fg.shape)
            else :
                # Calculate Adjoint forcing
		forcing_ad = np.zeros_like(fg)

            # Patch the boundaries
            domain.forcing_ad[np.where(domain.history == time),:] = domain.patch_boundaries(forcing_ad)
        
        # Call adjoint propagate
        domain.model_integration_ad()
        
        # Strip off the boundaries again on x and traj for DA
        gradient = domain.strip_off_boundaries(domain.ic_ad)

        Go = gradient.reshape(x.size)

        return (Gb + Go).reshape(len(Gb + Go))

    def gradient_check(self,*args):
        
        domain = args[0]
        obs_list = args[1]
        innov_time = args[2]
        Rinv = args[3]
        fg = args[4]
        Binv = args[5]
        
        zabuf=[]
        zfabuf=[]
        ztf2buf=[]
        
        itertest = 10
        pdx = 1.0e-15
        
        #----------------------------------------------------------------------------
        # [1] Initial point
        #----------------------------------------------------------------------------
        #xhat = np.random.sample(fg.size) - 0.5
        xhat = 0.1*fg.reshape(fg.size).copy()
        yhat = xhat.copy()
        
        zfy = self.costFunc(yhat, domain,obs_list,innov_time,Rinv,fg,Binv)
        grad = self.grad(yhat, domain,obs_list,innov_time,Rinv,fg,Binv)

        #----------------------------------------------------------------------------
        # [1.1] Define perturbation direction ZH
        #----------------------------------------------------------------------------       
        print 'The test direction is the opposite of the gradient '
        xdir  = -1.0  * grad
        
        #----------------------------------------------------------------------------
        # [1.2] Set function f value and derivative at origin
        #----------------------------------------------------------------------------
        zf0 = zfy
        zdf0 = np.dot( grad.transpose(), xdir )
        print 'grtest: F(0)=',zf0,' DF(0)=',zdf0

        if zdf0 > 0.0 :
            print 'GRTEST Warning, DF should be negative'
        if abs(zdf0) < math.sqrt(1.19209290E-07):
            print'GRTEST WARNING, DERIVATIVE IS TOO SMALL'

        #----------------------------------------------------------------------------
        # [2] Loop on test point
        #----------------------------------------------------------------------------  
        
        ztf2buf.append(0.0)
             
        for iter in range(itertest):
            
            za=pdx*(10.0**iter)
            
            print 'grtest iter=',iter,' alpha=',za

            #----------------------------------------------------------------------------
            # [2.1] Compute f and df at new point y=x+a.h
            #----------------------------------------------------------------------------
            
            yhat = xhat + za * xdir
            
            zfy = self.costFunc(yhat, domain,obs_list,innov_time,Rinv,fg,Binv)
            grad = self.grad(yhat, domain,obs_list,innov_time,Rinv,fg,Binv)
            
            zfa=zfy
            zdfa= np.dot( grad.transpose(), xdir )
            print 'grtest: ======alpha=',za,' F(a)=',zfa,' DF(a)=',zdfa,'========='
            
            zabuf.append(za)
            zfabuf.append(zfa)
            
            #----------------------------------------------------------------------------
            #[2.2] Quantity TC0=f(a)/f(0)-1
            #----------------------------------------------------------------------------

            # if f is continuous then TC0->1 at origin,
            # at least linearly with a.

            if abs(zf0) <= 1.17549435E-38: 
            #   do not compute T1 in this unlikely case
                print 'grtest: Warning: zf0 is suspiciously small.'
                print 'grtest: F(a)-F(0)=',zfa-zf0
            else:
                ztco=zfa/zf0-1.0
                print 'grtest: continuity TC0=%18.15f' % ztco

            #----------------------------------------------------------------------------
            #                     f(a)-f(0)
            #[2.3] Quantity T1=-----------
            #                      a.df(0)
            #----------------------------------------------------------------------------

            # if df is the gradient then T1->1 at origin,
            #         linearly with a. T1 is undefined if df(0)=0.

            if abs(za*zdf0) <= math.sqrt(1.17549435E-38):
                print 'grtest: Warning: could not compute ', \
                    'gradient test T1, a.df(0)=',za*zdf0
            else:
                zt1=(zfa-zf0)/(za*zdf0)
                print 'grtest: gradient T1=%18.15f' % zt1

            #----------------------------------------------------------------------------
            # [2.4] Quantity TC1=( f(a)-f(0)-a.df(0) )/a
            #----------------------------------------------------------------------------

            #     if df is the gradient and df is continuous,
            #         then TC1->0 linearly with a.
            ztc1=(zfa-zf0-za*zdf0)/za
            print 'grtest: grad continuity TC1=%18.15f' % ztc1

            #----------------------------------------------------------------------------
            # [2.5] Quantity T2=( f(a)-f(0)-a.df(0) )*2/a**2
            #----------------------------------------------------------------------------

            #     if d2f exists then T2 -> d2f(0) linearly with a.
            zt2=(zfa-zf0-za*zdf0)*2.0/(za**2)
            print 'grtest: second derivative T2=%18.15f' % zt2

            #----------------------------------------------------------------------------
            # [2.6] Quantity TC1A=df(a)-df(0)
            #----------------------------------------------------------------------------

            #     if df is the gradient in a and df is continuous,
            #     then TC1A->0 linearly with a.
            ztc1a=zdfa-zdf0
            print 'grtest: a-grad continuity TC1A=%18.15f' % ztc1a

            #----------------------------------------------------------------------------
            # [2.7] Quantity TC2=( 2(f(0)-f(a))+ a(df(0)+df(a))/a**2
            #----------------------------------------------------------------------------

            #     if f is exactly quadratic, then TC2=0, always: numerically
            #     it has to -> 0 when a is BIG. Otherwise TC2->0 linearly for
            #     small a is trivially implied by TC1A and T2.
            ztc2=(2.0*(zf0-zfa)+za*(zdf0+zdfa))/(za**2)
            print 'grtest: quadraticity TC2=%18.15f' % ztc2

            #----------------------------------------------------------------------------
            #                     2   f(0)-f(b)   f(a)-f(b)
            # [2.8] Quantity TF2=---( --------- + --------- )
            #                     a       b          a-b
            #----------------------------------------------------------------------------

            #     if 0, a and b are distinct and f is quadratic then
            #     TF2=d2f, always. The estimate is most stable when a,b are big.
            #     This test works only after two loops, but it is immune against
            #     gradient bugs.

            if iter >= 1:
                zb =zabuf [iter-1]
                zfb=zfabuf[iter-1]
                ztf2=2.0/za*((zf0-zfb)/zb+(zfa-zfb)/(za-zb))
                print 'grtest: convexity ZTF2=%18.15f' % ztf2
                ztf2buf.append(ztf2)
    
        #----------------------------------------------------------------------------
        # [3] Comment on the results
        #----------------------------------------------------------------------------

        #   TC0(0)/TC0(2)<.011 -> df looks continuous
        #   item with (T1<1 and 1-T1 is min) = best grad test item
        #   reldif(TF2(last),TF2(last-1)) = precision on quadraticity

        #----------------------------------------------------------------------------
        #       3.1 Fundamental checks
        #----------------------------------------------------------------------------

        print 'GRTEST: TENTATIVE CONCLUSIONS :'

        ZTC00=abs(zfabuf[0]-zf0)
        ZTC02=abs(zfabuf[2]-zf0)
        if ZTC00/zabuf[0]  <=  1.5*(ZTC02/zabuf[2]) :
            print 'GRTEST: function f looks continous.'
        else:
           print 'GRTEST: WARNING f does not look continuous', \
                ' (perhaps truncation problem)'

        #----------------------------------------------------------------------------
        #       3.2 Gradient quality
        #----------------------------------------------------------------------------

        if abs(zdf0) <= math.sqrt(1.17549435E-38) :
            print 'GRTEST: The gradient is 0, which is unusual !'
            ZTC10=abs(zfabuf[0]-zf0)
            ZTC12=abs(zfabuf[2]-zf0)
            if ZTC10/zabuf[0]**2  <=  1.1*ZTC12/zabuf[2]**2 : 
                print 'GRTEST: The gradient looks good anyway.'
        else:
            #    Find best gradient test index
            ZERMIN=3.40282347e+38
            ibest=-1
            for iter in range(itertest):
                ZT1TST=(zfabuf[iter]-zf0)/(zabuf[iter]*zdf0)
                ZT1TST=abs(ZT1TST-1.0)
                if ZT1TST < ZERMIN:
                    ibest=iter
                    ZERMIN=ZT1TST
            
            if ibest == -1:
                print 'GRTEST: gradient test problem : bad ',\
                    'gradient, non-convex cost, or truncation errors ?'
            else:
                idig=int(-math.log(ZERMIN+1.17549435E-38)/math.log(10.0))
                print 'GRTEST: the best gradient test found has ',\
                        idig,' satisfactory digits.'
                if idig <= 1:
                    print 'GRTEST: SAYS: THE GRADIENT IS VERY BAD.'
                elif idig <= 3:
                    print 'GRTEST: SAYS: THE GRADIENT IS SUSPICIOUS.'
                elif idig <= 5:
                    print 'GRTEST: SAYS: THE GRADIENT IS ACCEPTABLE.'
                else:
                    print 'GRTEST: SAYS: THE GRADIENT IS EXCELLENT.'

                if ibest <= itertest-3:
                    ZTC10=abs(zfabuf[ibest  ]-zf0-zabuf[ibest  ]*zdf0)/zabuf[ibest  ]
                    ZTC12=abs(zfabuf[ibest+2]-zf0-zabuf[ibest+2]*zdf0)/zabuf[ibest+2]
                    if ZTC10/zabuf[ibest] <=  1.1*ZTC12/zabuf[ibest+2] :
                        print 'GRTEST: Gradient convergence looks good.'
                    else:
                        print 'GRTEST: Gradient convergence is suspicious.'
                else:
                    print 'GRTEST: could not check grad convergence.'

        #----------------------------------------------------------------------------
        #         3.3 Quadraticity
        #      finite difference quadraticity test (gradient-free)
        #----------------------------------------------------------------------------

        ZTF2=ztf2buf[itertest-1]
        ZTF2L=ztf2buf[itertest-2]
        print 'GRTEST: finite diff. d2f estimate no1:',ZTF2
        print 'GRTEST: finite diff. d2f estimate no2:',ZTF2L
        ZREF=(abs(ZTF2L)+abs(ZTF2))/2.0
        if ZREF <= 1.17549435E-38: 
            print 'GRTEST: they are too small to decide whether ',\
                    'they agree or not.'
        else:
            idig=int(-math.log(abs(ZTF2L-ZTF2)/ZREF + 1.17549435E-38)/math.log(10.0))
            print 'GRTEST: the fin.dif. estimates of d2f ',\
                    'have ',idig,' satisfactory digits.'
            if idig <= 1:
                print 'GRTEST: THE FD-QUADRATICITY IS BAD.'
            elif idig <= 3:
                print 'GRTEST:: THE FD-QUADRATICITY IS SUSPICIOUS.'
            elif idig <= 5:
                print 'GRTEST: THE FD-QUADRATICITY IS ACCEPTABLE.'
            else:
                print 'GRTEST: THE FD-QUADRATICITY IS EXCELLENT.'

        print 'grtest: Goodbye.' 
        #sys.exit()        
                    
                                        
    # Returns the analysis ensemble array and the analysis parameter array.
    # Analysis = (Ntimestep*SimulationDimension)x(EnSize) numpy array
    # AnalysisParam = (Parameter Size + Initialization Size)x(EnSize) numpy array
    def create_analysis(self,domain,fg,Binv,obs_list,Rinv,innov_time):

        
        # Redefine the history output interval
        # We need to save the basic states at every timestep
        domain.history_interval = domain.timestep
        domain.model_output_history()
                
        self.gradient_check(domain,obs_list,innov_time,Rinv,fg,Binv)
      
        opts = {}
        
        if self.optimization_algrm_choice.upper() == 'L-BFGS-B' :
            opts = {'gtol': 1e-6, 'maxcor' : 5, 'maxiter' : 3000, 'disp' : True}
        elif self.optimization_algrm_choice.upper() == 'CG' :
            opts = {'gtol': 1e-6, 'maxiter' : 3000, 'disp' : True}
        elif self.optimization_algrm_choice.upper() == 'BFGS' :
            opts = {'maxiter' : 3000, 'disp' : True}
            
        res = minimize(self.costFunc, fg, method=self.optimization_algrm_choice.upper(), jac=self.grad, \
                    args=(domain,obs_list,innov_time,Rinv,fg,Binv), options=opts)

        Analysis = res.x
            
        print 'Final cost function = %f' % res.fun
        print 'Convergency flag: ', res.success
        print 'Cause of the termination: ',  res.message
        print 'The gradient at the minimum = %f', res.jac
        print 'Number of func calls: %d' % res.nfev
        if hasattr(res, 'nit'):
            print 'Number of iterations : %d' % res.nit
        print 'Number of observations : %d' % self.total_obs_number
        print 'The ratio of the cost function to the number of obs.= %f' % (res.fun/self.total_obs_number)
            
                        
        return Analysis

