"""
/*=========================================================================

  Program:   Insight Segmentation & Registration Toolkit
  Module:    $RCSfile: itkRegularStepGradientDescentBaseOptimizer.cxx,v $
  Language:  C++
  Date:      $Date: 2007-09-10 16:22:23 $
  Version:   $Revision: 1.24 $

  Copyright (c) Insight Software Consortium. All rights reserved.
  See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.

     This software is distributed WITHOUT ANY WARRANTY without even 
     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
     PURPOSE.  See the above copyright notices for more information.

=========================================================================*/
Translated to python on Jul 19, 2009

@Translator: bryan
"""

#===============================================================================
# Imports
#===============================================================================
import numpy as np
import pylab
import scipy.optimize
import time
from datetime import datetime

#===============================================================================
# Main Class
#===============================================================================
class RegularStepGradientDescentOptimizer():
    """
    Class translated from ITK C++ version 
    """


    def __init__(self):
        """
        Constructor
        """
        self._MaximumStepLength = 4.0
        self._MinimumStepLength = 0.125
        self._MinimumGradientStepLength = 0.125
        self._MaximumGradientStepLength = 1.25
        self._GradientMagnitudeTolerance = 1e-4
        self._RelaxationFactor = 0.6180339887
        self._stepLengthChanges = 0
        self._NumberOfIterations = 100
        self._CurrentIteration   =   0
        self._Value = 0
        self._BestValue = 1e300
        self._Maximize = False
        self._CostFunction = 0
        self._CurrentStepLength   =   0
        self._StopCondition = "Unknown"
        self._Gradient = np.zeros( 6 )
        self._PreviousGradient = np.zeros( 6 )
        self._InitialPosition = np.zeros( 6 )
        self._CurrentPosition = np.zeros( 6 )
        self._scales = np.ones( 6 )
        self._ScalesInitialized = False
        self._debug = False
        self.args = {}


    def GetMaximumStepLength(self):
        return self._MaximumStepLength

    def GetMinimumStepLength(self):
        return self._MinimumStepLength
    
    def GetMaximumGradientStepLength(self):
        return self._MaximumGradientStepLength

    def GetMinimumGradientStepLength(self):
        return self._MinimumGradientStepLength
    
    def SetMaximumGradientStepLength(self, value):
        self._MaximumGradientStepLength = value

    def SetMinimumGradientStepLength(self, value):
        self._MinimumGradientStepLength = value

    def GetGradientMagnitudeTolerance(self):
        return self._GradientMagnitudeTolerance

    def GetNumberOfIterations(self):
        return self._NumberOfIterations

    def GetRelaxationFactor(self):
        return self._RelaxationFactor

    def SetMaximumStepLength(self, value):
        self._MaximumStepLength = value

    def SetMinimumStepLength(self, value):
        self._MinimumStepLength = value

    def SetGradientMagnitudeTolerance(self, value):
        self._GradientMagnitudeTolerance = value

    def SetNumberOfIterations(self, value):
        self._NumberOfIterations = value

    def SetRelaxationFactor(self, value):
        self._RelaxationFactor = value

    def SetCurrentPosition(self, position):
        self._CurrentPosition = position
        
    def GetCurrentPosition(self):
        return self._CurrentPosition
        
    def GetInitialPosition(self):
        return self._InitialPosition
    
    def SetInitialPosition(self,position):
        self._InitialPosition = position
        
    def GetScales(self):
        return self._scales
    
    def SetScales(self,scales):
        self._scales = scales
        self._ScalesInitialized = True
    
    def SetCostFunction(self, costFunction):
        """
        """
        if( self._CostFunction == costFunction ):
            return

        if self._debug:
            print "setting CostFunction  to ",  costFunction

        self._CostFunction = costFunction;

        if( not self._ScalesInitialized):
            numberOfParameters = self._CostFunction.GetNumberOfParameters()
            scales = np.ones( numberOfParameters )
            self.SetScales( scales )
            self._ScalesInitialized = True

    def StartOptimization(self):
        """
        """
        self._CurrentStepLength = self._MaximumStepLength
        self._CurrentIteration  = 0

        self._StopCondition = "Unknown"
        self._startTime = time.time()

        # validity check for the value of GradientMagnitudeTolerance
        if ( self._GradientMagnitudeTolerance < 0.0 ):
            print "Gradient magnitude tolerance must be"\
                  "greater or equal 0.0. Current value is ", \
                  self._GradientMagnitudeTolerance
            return
                              
        # Need to change this function call
        spaceDimension = self._CostFunction.GetNumberOfParameters()

        self.SetCurrentPosition(self.GetInitialPosition())
        self.ResumeOptimization()


    def ResumeOptimization(self):
        """
        """
        self._Stop = False

        while( not self._Stop ): 
            if( self._CurrentIteration >= self._NumberOfIterations ):
                self._StopCondition = "MaximumNumberOfIterations"
                self.StopOptimization()
                break
            
            self._PreviousGradient = self._Gradient
            
            # Use smaller range for finite difference derivatives as the 
            # step size decreases but limit it to prevent noise
#            grad_step = np.max((self._CurrentStepLength,
#                               self._MinimumGradientStepLength))
#            grad_step = np.min((grad_step,
#                                self._MaximumGradientStepLength))
            grad_step = self._MaximumGradientStepLength
            
            if self._debug:
                print "gradient step: ", grad_step
    
            try:
                currentPosition = self.GetCurrentPosition()
                self._Value = self._CostFunction.GetValue(currentPosition, 
                                                           **self.args )
                self._Gradient = self._CostFunction.GetGradient(currentPosition,
                                                                  grad_step) 
            except:
                self._StopCondition = "CostFunctionError"
                self.StopOptimization()
                raise
          
            if( self._Stop ):
                break
            
            self.AdvanceOneStep()
            self._CurrentIteration = self._CurrentIteration + 1
            if self.IsBest(self._Value):
                self._BestValue = self._Value
    
    def IsBest(self, value):
        """ Value is a float that is returned from the cost function.  Returns 
        a bool to represent whether the value is best or not.
        This function will check if the best value that has been found, if it 
        is the best value, nothing is done, it is up to the calling function 
        to set the current value as the best value.
        It will take into account if the optimizer is set to minimize or 
        maximize.  
        """
        if self._Maximize:
            if value > self._BestValue:
                return True
        else:
            if value < self._BestValue:
                return True
        
        return False

    def StopOptimization(self):
        """
        """
        self._Stop = True


    def AdvanceOneStep(self):
        """
        """
        spaceDimension = self._CostFunction.GetNumberOfParameters()

        scales = np.asarray(self.GetScales())

        if( self._RelaxationFactor < 0.0 ):
            print "Relaxation factor must be positive. Current value is ",\
                  self._RelaxationFactor
            return
        
        if( self._RelaxationFactor >= 1.0 ):
            print "Relaxation factor must less than 1.0. Current value is ",\
                  self._RelaxationFactor
            return
            
        # Make sure the scales have been set properly
        if (len(scales) != spaceDimension):
            print ( "The size of Scales is ",
                      len(scales),
                      ", but the NumberOfParameters for the CostFunction is ",
                      spaceDimension, ".")
           
        transformedGradient  = np.asarray(self._Gradient) / scales
        previousTransformedGradient = np.asarray(self._PreviousGradient) / scales
            
        self._gradientMagnitude = np.sqrt( np.sum(transformedGradient**2) )

        if( self._gradientMagnitude < self._GradientMagnitudeTolerance ): 
            self._StopCondition = "GradientMagnitudeTolerance"
            self.StopOptimization()
            return
    
        scalarProduct = np.sum( transformedGradient * 
                                    previousTransformedGradient )
    
        # If there is a direction change
        # large positive changes could mask small small negative changes 
        #if( scalarProduct < 0 ):
        if (transformedGradient * previousTransformedGradient < 0).any():
            if self._debug:
                print "Step Length Contraction"
            self._CurrentStepLength *= self._RelaxationFactor
            self._stepLengthChanges += 1
          
        if( self._CurrentStepLength < self._MinimumStepLength ):   
            self._StopCondition = "StepTooSmall"
            self.StopOptimization()
            return

        if( self._Maximize ):
            direction = 1.0
        else:
            direction = -1.0
    
        factor = direction * self._CurrentStepLength / self._gradientMagnitude

        # This method StepAlongGradient() will 
        # be overloaded in non-vector spaces
        self.StepAlongGradient( factor, transformedGradient )


    def StepAlongGradient( self, factor, transformedGradient ):
        """
        """
        if self._debug:
            print "Current Iteration: ", self._CurrentIteration
            print "factor = ", factor
            print "Current Step Length = ", self._CurrentStepLength
            print "Gradient Magnitude = ", self._gradientMagnitude
            print "TransformedGradient= ", transformedGradient

        currentPosition = np.asarray( self.GetCurrentPosition() )

        newPosition = currentPosition + transformedGradient * factor

        if self._debug:
            print "new position = ", newPosition

        self.SetCurrentPosition( newPosition )
        self._runningTime = time.time() - self._startTime


    def PrintSelf(self):
        """
        """
        print "MaximumStepLength: ", self._MaximumStepLength
        print "MinimumStepLength: ", self._MinimumStepLength 
        print "RelaxationFactor: ",  self._RelaxationFactor 
        print "GradientMagnitudeTolerance: ", self._GradientMagnitudeTolerance 
        print "NumberOfIterations: ", self._NumberOfIterations 
        print "CurrentIteration: ", self._CurrentIteration   
        print "Value: ", self._Value 
        print "Maximize: ", self._Maximize 
        
        if (self._CostFunction):
            print "CostFunction: ", self._CostFunction 
        else:
            print "CostFunction: (None)" 
            
        print "CurrentStepLength: ", self._CurrentStepLength 
        print "StepLengthChanges: ", self._stepLengthChanges
        print "StopCondition: ", self._StopCondition 
        print "Gradient: ", self._Gradient 
        print "Initial Position: ", self._InitialPosition
        print "Current Position: ", self._CurrentPosition
        print "Running Time: ", self._runningTime
        
        if (self._ScalesInitialized):
            print "Scales: ", self._scales
        else:
            print "Scales: not defined (default 1)" 
            
    def GetStringOutput(self):
        """
        """
        dt = datetime.fromtimestamp(time.time())
        time_stamp = dt.strftime('%m/%d/%Y_%H:%M:%S')
            
        output = (time_stamp,
                  str(self._MaximumStepLength),
                  str(self._MinimumStepLength),
                  str(self._RelaxationFactor),
                  str(self._GradientMagnitudeTolerance),
                  str(self._NumberOfIterations),
                  str(self._CurrentIteration),
                  str(self._Value),
                  str(self._Maximize),
                  str(self._CurrentStepLength),
                  str(self._StopCondition),
                  str(self._Gradient),
                  str(self._InitialPosition),
                  str(self._CurrentPosition),
                  str(self._scales))
        
        return output
    
    def GetDictOutput(self,full=False):
        """Full output returns everything, otherwise just start/finish pose  """
        dt = datetime.fromtimestamp(time.time())
        time_stamp = dt.strftime('%m/%d/%Y_%H:%M:%S')
        if full:
            output = dict(timestamp = time_stamp,
                          MaximumStepLength = str(self._MaximumStepLength),
                          MinimumStepLength = str(self._MinimumStepLength),
                          RelaxationFactor = str(self._RelaxationFactor),
                          GradientMagnitudeTolerance = str(self._GradientMagnitudeTolerance),
                          NumberOfIterations = str(self._NumberOfIterations),
                          CurrentIteration = str(self._CurrentIteration),
                          Value = str(self._Value),
                          Maximize = str(self._Maximize),
                          CurrentStepLength = str(self._CurrentStepLength),
                          StopCondition = str(self._StopCondition),
                          Gradient = str(self._Gradient),
                          InitialPosition = str(self._InitialPosition),
                          CurrentPosition = str(self._CurrentPosition),
                          scales = str(self._scales))
        else:
            output = dict(InitialPosition = str(self._InitialPosition),
                          CurrentPosition = str(self._CurrentPosition))
            
        
        return output
    
    def PlotParams(self):
        """
        For Debugging.  Plot the step size for each iteration using the current
        parameters.
        """
        step = []
        step.append(self._MaximumStepLength)
        while True:
            if step[-1] > self._MinimumStepLength:
                step.append(step[-1] * self._RelaxationFactor)
            else:
                break
        
        pylab.plot(step)
            
    
        
    
    MaximumStepLength = property(GetMaximumStepLength, SetMaximumStepLength, None, "MaximumStepLength's Docstring")

    MinimumStepLength = property(GetMinimumStepLength, SetMinimumStepLength, None, "MinimumStepLength's Docstring")

    GradientMagnitudeTolerance = property(GetGradientMagnitudeTolerance, SetGradientMagnitudeTolerance, None, "GradientMagnitudeTolerance's Docstring")

    NumberOfIterations = property(GetNumberOfIterations, SetNumberOfIterations, None, "NumberOfIterations's Docstring")

    RelaxationFactor = property(GetRelaxationFactor, SetRelaxationFactor, None, "RelaxationFactor's Docstring")


class OptimumStepGradientDescentOptimizer(RegularStepGradientDescentOptimizer):
    """
    A variation of the RegularStepGradientDescentOptimizer where the step
    length is optimized at each iteration.
    """
    def __init__(self):
        """
        Constructor
        """
        RegularStepGradientDescentOptimizer.__init__(self)
        self._MaximumStepLength = 10
        self._MinimumStepLength = 0.01
        
        # last step length is used to find the optimum current step length
        self._last_step = (self._MaximumStepLength + self._MinimumStepLength) / 2.0
    
    def AdvanceOneStep(self):
        """
        """
        spaceDimension = self._CostFunction.GetNumberOfParameters()

        scales = np.asarray(self.GetScales())

        if( self._RelaxationFactor < 0.0 ):
            print "Relaxation factor must be positive. Current value is ",\
                  self._RelaxationFactor
            return
        
        if( self._RelaxationFactor >= 1.0 ):
            print "Relaxation factor must less than 1.0. Current value is ",\
                  self._RelaxationFactor
            return
            
        # Make sure the scales have been set properly
        if (len(scales) != spaceDimension):
            print ( "The size of Scales is ",
                      len(scales),
                      ", but the NumberOfParameters for the CostFunction is ",
                      spaceDimension, ".")
           
        transformedGradient  = np.asarray(self._Gradient) / scales
        previousTransformedGradient = np.asarray(self._PreviousGradient) / scales
            
        self._gradientMagnitude = np.sqrt( np.sum(transformedGradient**2) )

        if( self._gradientMagnitude < self._GradientMagnitudeTolerance ): 
            self._StopCondition = "GradientMagnitudeTolerance"
            self.StopOptimization()
            return
    
        scalarProduct = np.sum( transformedGradient * 
                                    previousTransformedGradient )
        

        if( self._Maximize ):
            direction = 1.0
        else:
            direction = -1.0
            
        # Find an optimum step length using brent's method
        # bracketed by an upper and lower bounds and the previous
        # step length.
        a = self._MinimumStepLength
        b = self._MaximumStepLength
        #c = self._last_step
        c = (self._MaximumStepLength + self._MinimumStepLength) / 2.0 
        
        def func(step):
            factor = direction * step / self._gradientMagnitude
            newPosition = self._CurrentPosition + transformedGradient * factor
            cost = self._CostFunction.GetValue(newPosition)
            return cost
        
        x,cost,iter,fcalls = scipy.optimize.fminbound(func,1e-6,1e-2,xtol=1e-4,
                                                  full_output=1,maxfun=10,disp=0)
        
        if self._debug:
            print "Step Length Optimization"
            print "    Best step: ", x
            print "    # iterations to find: ", iter
        
        self._last_step = self._CurrentStepLength
        self._CurrentStepLength = float(x)
    
        factor = direction * self._CurrentStepLength / self._gradientMagnitude

        # This method StepAlongGradient() will 
        # be overloaded in non-vector spaces
        self.StepAlongGradient( factor, transformedGradient )



class StudholmeOptimizer:
    """
    Implementation of Studholme neighborhood search (used by Russakoff)
    """

    def __init__(self):
        # Size of the step taken for each parameter at each iteration
        self._stepSize = .25
        # Number of parameters to optimize
        self._nParams = 6
        # Weights to scale the step size for each parameter (higher value means 
        # bigger step)
        self._weights = np.ones(6)
        #self._weights[3:6] = np.pi/180
        self._weights[3:6] = 1/35.0 # determined from start position data
        # The best parameters identified so far
        self._bestParameters = []
        # The best metric value identified so far
        self._bestValue = None
        # The parameters from the last iteration
        self._lastParameters = np.zeros(self._nParams)
        # The function to be optimized should have a GetMetric method
        self._function = None
        # Stop Conditions: 0=number of iterations, 1=tolerence (self._tol)
        self._stopConditions = np.ones(2,'bool')
        # StopCondition
        self._maxIterations = 200
        # Set tolerence stop condition
        self._tol = 0.00001
        # Choose whether function should be minimized or maximized
        self._minimize = True
        # The number of levels to use in optimizer (each level will progressively
        # increase the resolution of hte images (less blur) and decrease the step size.
        self._nLevels = 4
        # Set Registration
        self._registration = Registration
        
        # For debugging the images generated by the DRR funciton can be saved
        self._saveOutputImages = False
        
        # A flag to allow plotting during optimization
        self._plot = False
        
    
    def SetFunction(self,function):
        """
        Supply the function that is to be minimized.  It will be called with 
        the parameters to be optimized.
        """
        self._function = function
        
    def SetInitialParameters(self,parameters):
        """
        Initiate the optimizer with a starting guess.  The function will be 
        evaluated to determine the inital value.
        """
        self._nParams = len(parameters)
        self._initialGuess = parameters
        
        # If the weights have not been set, initialize them to 1
        if self._weights == []:
            self._weights = np.ones(self._nParams)
        
        # Get Initial value of function
        self._bestValue = self._function(self._initialGuess)
        print "Initial value for optimzer found"
        
    def SetRegistration(self, registration):
        """
        This is a temporary method until the registration class is updated.
        """
        self._registration = registration   
        
    def SaveImageAtEachIteration(self,outputDirectory):
        """
        For debugging it might be useful to have the image generated by the 
        DRR at each iteration.
        """ 
        if outputDirectory != None:
            self._outputDir = outputDirectory
            self._saveOutputImages = True
        else:
            self._saveOutputImages = False
            
            
    def Optimize(self):
        """
        Generate a metric value for +/- step for each parameter (total of 12 
        evaluations, plus the original).
        Returns best parameters and best value.
        """
        if self._plot:
            pylab.ion()
            fig = pylab.figure()
            fig.canvas.set_window_title('Optimizer Results')
            pylab.hold(False)
        
        nParams = self._nParams
        start = time.time()
        
        # Initial paramters must be set first 
        bestParameters = self._initialGuess
        lastParameters = np.zeros(nParams)
        
        # The neighborhood of paramers (+/- stepSize and original), lastParameters
        # will be item 0
        currentParams = np.zeros((2*nParams+1,nParams))
        currentParams[0] = bestParameters
#        currentParams[:][:] = self._bestParameters
        # The neighborhood of metric values (+/- stepSize and original) 
        # lastValue will be item 0
        currentValues = np.zeros(2*nParams+1)
        currentValues[0] = self._function(bestParameters)
#        currentValues[:] = self._bestValue
        allParams = np.zeros((self._maxIterations,nParams))
        allValues = np.zeros(self._maxIterations)
        
        allParams[0,:] = currentParams[0]
        allValues[0] = currentValues[0]

        iteration = 1

        # Save some diagnositic information for tuning the optimizer
        debugInfo = np.zeros((self._maxIterations,10))
        
        # Level will determine amount of image blur and step size 
        # Reverse level increment, ending at 1
        for level in range(self._nLevels,0,-1):
            # Set the image blur based on the level
            blur = level/2
            #self._registration._blur = blur
            # Set the step size based on the level
            stepSize = level*self._stepSize
            print "Level: ", level, ", Tx Step Size: ",stepSize , ", Rx Step Size: ", stepSize * self._weights[5]
            print "Current parameters: ", bestParameters
            print "Stop Conditions: ", self._stopConditions
            #while self._stopConditions.all(): #loop until a solution is found
            #while bestParameters.all != self._lastParameters.all:
            while 1: 
                for p in range(1,nParams+1):
                    # initialize pose and value neighborhood to best estimates
                    currentParams[p] = bestParameters
                    # Add step to each parameter in turn
                    currentParams[p][p-1] += stepSize * self._weights[p-1]
                    currentValues[p] = self._function(currentParams[p])
                    #self._registration._ImageGenerator.Render()
                    print "+: ", p, ", V:", currentValues[p], \
                        ", P:", currentParams[p]
                    # Subtract step to each parameter in turn
                    currentParams[p+nParams] = bestParameters
                    currentParams[p+nParams][p-1] -= stepSize * self._weights[p-1]
                    currentValues[p+nParams] = self._function(currentParams[p+nParams])
                    #self._registration._ImageGenerator.Render()
                    print "-: ", p+nParams, ", V:", currentValues[p+nParams], \
                        ", P:", currentParams[p+nParams]
                    
                print "Neighborhood generated, finding best solution"
                # Search for minimum optimum value
                if self._minimize:
                    bestNeighbor = currentValues.argmin()
                else:
                    bestNeighbor = currentValues.argmax()
                    
                if bestNeighbor == 0:
                    print "Converged at level ", level
                    break
                else:
                    # Setup for next iteration
                    # the best parameters will become item 0 in the currentParams
                    # array and will be modified for each of the other parameters
                    # First save the last parameters, before reassigning the bestParameters
                    # to the new values
                    lastParameters = bestParameters
                    # Now assign new values
                    bestValue = currentValues[bestNeighbor]
                    bestParameters = currentParams[bestNeighbor]
                    print "Best neighbor: ", bestNeighbor, ", Value: ", bestValue
                    print "Best Parameters: ", bestParameters
                    print "Last Parameters: ", lastParameters
                    # Now assign the values so that they are ready for next iteraiton
                    currentParams[0] = bestParameters
                    currentValues[0] = bestValue
                    allParams[iteration,:] = bestParameters
                    allValues[iteration] = bestValue
                    iteration += 1
                    if self._plot:
                        pylab.subplot(311)
                        pylab.plot(allParams[0:iteration,0:3])
                        pylab.ylabel('Translations (mm)')
                        pylab.subplot(312)
                        pylab.plot(allParams[0:iteration,3:6])
                        pylab.ylabel('Rotations (rad)')
                        pylab.subplot(313)
                        pylab.plot(allValues[0:iteration])
                        pylab.ylabel('Function Value')
                
                p = bestParameters
                debugInfo[iteration/12,0:9]=[level,stepSize,p[0],p[1],p[2],p[3],p[4],p[5],bestValue]
#                debugInfo=1
                
                
                if iteration > self._maxIterations:
                    # Max number of iterations reached updated stop conditions
                    self._stopConditions[0] = False
                
                if np.abs(bestParameters.all()-lastParameters.all()) < self._tol:
                    self._stopConditions[1] = False
                
                if kbhit():
                    k = getch()
                    if k == 'q':
                        print "Optimization Cancelled"
                        return bestParameters, bestValue
                    else:
                        print "Ending current level and continuing"
                        break
        
        end = time.time()
        print "Optimizer run time: ", end-start, ' s'
        debugFileName = 'C:/Users/bryan/bryan-code/trunk/Images/debug/regResults.dat'
        debugFile = open(debugFileName,'w')
        cPickle.dump(debugInfo,debugFile)
        print "Debug info pickled here: ", debugFileName
        pylab.show()
        return bestParameters, bestValue


class OptimizerGD:
    """ 
    (self,problem,startx=None,abstol=1e-10,reltol=1e-6,verbose=0,
                 xtol=1e-6,startstep=2.0,minstep=1e-5,stepf=2,cbf=None)
    
    Gradient descent optimizer for registration class.
    Just go down along the gradient, adapting the step size as needed.
    Usage:

      o=OptimizerGdes(evaluableProblemInstance)
    while not o.hasConverged():
          o.makeStep()
    print o.getX()

    optional parameters to the constructor include the starting point
    'startx', tolerances 'abstol', 'reltol', verbosity flag 'verbose'.
    
    Callback function 'cbf' can also be specified.
    
    Jan Kybic, October 2000
    $Id: bigoptimize.py,v 1.2 2002/01/21 16:33:19 jkybic Exp $
    """

    def __init__(self,problem,startx=None,abstol=1e-10,reltol=1e-6,verbose=0,
                 xtol=1e-6,startstep=2.0,minstep=1e-5,stepf=2,cbf=None):
        self.p=problem # save the problem instance
        self.minstep = minstep
        self.cbf = cbf
        self.abstol = abstol
        self.reltol = reltol
        self.verbose = verbose
        self.step = startstep
        self.stepf = stepf
        self.xtol = xtol
        self.dx = None
        self.scale = [100,100,100,1,1,1]
        self.grad_range = np.array([2,2,2,np.pi/90,np.pi/90,np.pi/90])
        #TODO: rewrite registration class to match problem class
        self.setX(startx) # get initial X        
        # self.cost,self.gr now contain the most current values
        self.i=1       # the iteration number (number of evaluations)
        if self.verbose:
            print "Initial i=",self.i," step=",self.step," Cost=",self.cost
        if self.cbf:
            cbf(self.cost)

    def hasConvergedCost(self): # return true if we have converged already
        if self.step <= self.minstep: return 1
        if not self.success: return 0        # finish only after going down
        dif = abs(self.cost - self.oldCost)
        return dif <= self.abstol or dif <= self.reltol * self.cost

    def hasConverged(self): # convergence criterion based on the change of x
        # maximum absolute change of x must be smaller than xtol
        if self.dx is None:
            return 0 # we have not yet tried
        if self.verbose > 1:
            print "linf norm of dx=",np.max(np.abs(self.dx))
        return np.max(abs(self.dx)) <= self.xtol


    def setX(self,x):
        self.x = x 
        # Get cost function value and gradient
        self.cost = self.p.GetValue(self.x)
        self.gradient = self.p.GetGradient(self.x,self.grad_range) # most current values
        self.success=0  # was the last step successful

    def getX(self):
        return self.x

    def GetCost(self):
        return self.cost

    def getStep(self):
        return self.step

    def setStep(self,step):
        self.step=step

    def makeStep(self): 
        # calculate next trial point and evaluate there
        self.dx = -self.gradient * self.step
        tx = self.x + self.dx
        tCost = self.p.GetValue(tx)
        tGradient = self.p.GetGradient(tx,self.grad_range)
        self.i = self.i + 1
        if tCost <= self.cost: # successful step
            self.oldCost = self.cost
            self.step = self.step * self.stepf
            self.x, self.cost, self.gradient = tx, tCost, tGradient
            self.success = 1
            if self.verbose:
                print "x:       ", tx
                print "Success: i=",self.i," step=",self.step," Cost=",self.cost
        else:
            # the 1.5 factor is a heuristic to avoid cycles
            self.step = max(self.step / self.stepf,
                            self.minstep) # increase lambda
            self.success = 0 # we do not want to stop in the next step
            if self.verbose:
                print "Failure: i=",self.i," step=",self.step," Cost=",tCost
        if self.cbf:
            self.cbf(self.cost)


    def makeSuccessfulStep(self):
        # make steps until one is successful
        while 1:
            self.makeStep()
            if self.success or self.step <= self.minstep:
                return

    def showEnvironment(self,title=None,grad=None,step=None):
        # show the profile along the gradient
        stept=0.1 ; t=arange(-2,2+stept,stept)
        y=zeros(t.shape,'f')
        if not grad:
            grad=self.gr
        grad/=sqrt(np.sum(grad**2))
        if not step:
            step=self.step
        for i in range(t.shape[0]):
            y[i]=self.p.GetCost(self.x+grad*t[i]*step)
        tit=title or 'showEnvironment'
        g=pylab.plot(t,y,title=tit)
        return g

class rosen_test_func():
    def __init__(self):
        self.func = scipy.optimize.rosen
        self.grad = scipy.optimize.rosen_der
        self.count = 0
        
    def GetValue(self,x):
        cost = self.func(x)
        self.count += 1
        
        print "Iteration: %i, Cost: %f,  X: %f, %f, %f, %f, %f, %f" % \
                (self.count, cost,x[0],x[1],x[2],x[3],x[4],x[5])
        return self.func(x)
    
    def GetGradient(self,x,step):
        return self.grad(x)
    
    def GetNumberOfParameters(self):
        return 6
        

def help():        
    print "See Test_Optimize.py"
    
def test():
    rosen = rosen_test_func()
    #gd = OptimumStepGradientDescentOptimizer()
    gd = RegularStepGradientDescentOptimizer()
    gd.SetCostFunction(rosen)
    gd.SetInitialPosition([2.4,-2.6,4.6,2.6,1.9,-4.3])
    gd.SetNumberOfIterations(10000)
    gd.SetMinimumStepLength(1e-6)
    gd.StartOptimization()
    gd.PrintSelf()
    
    
if __name__ == '__main__':
    test()
    
    
        