--[[ A modified implementation of Adam where different parameters have different learning rates

    ARGS:
    opfunc                   - a function that takes a single input (X), the point of a evaluation, and returns f(X) and df/dX
    x                        - the initial point
    config                   - a table with configuration parameters for the optimizer
    config.learningRate      - learning rates
    config.learningRateSplit - input split indices for different learning rates
    config.learningRateDecay - learning rate decay
    config.beta1             - first moment coefficient
    config.beta2             - second moment coefficient
    config.epsilon           - for numerical stability
    config.weightDecay       - weight decay
    state                    - a table describing the state of the optimizer; after each
                                  call the state is modified
    Returns:
    x                        - the new x vector
    f(x)                     - the function, evaluated before the update
]]--

require 'optim'

function optim.adammy(opfunc, x, config, state)
   -- (0) get/update state
   local config = config or {}
   local state = state or config
   local lr = config.learningRate or torch.Tensor({0.001})
   local lrs = config.learningRateSplit or torch.Tensor({0,x:size(1)})
   local lrd = config.learningRateDecay or 0

   local beta1 = config.beta1 or 0.9
   local beta2 = config.beta2 or 0.999
   local epsilon = config.epsilon or 1e-8
   local wd = config.weightDecay or 0

   -- (1) evaluate f(x) and df/dx
   local fx, dfdx = opfunc(x)

   -- (2) weight decay
   if wd ~= 0 then
      dfdx:add(wd, x)
   end

   -- Initialization
   state.t = state.t or 0
   -- Exponential moving average of gradient values
   state.m = state.m or x.new(dfdx:size()):zero()
   -- Exponential moving average of squared gradient values
   state.v = state.v or x.new(dfdx:size()):zero()
   -- A tmp tensor to hold the sqrt(v) + epsilon
   state.denom = state.denom or x.new(dfdx:size()):zero()

   -- (3) learning rate decay (annealing)
   local clr = torch.div(lr, 1 + state.t*lrd)

   state.t = state.t + 1

   -- Decay the first and second moment running average coefficient
   state.m:mul(beta1):add(1-beta1, dfdx)
   state.v:mul(beta2):addcmul(1-beta2, dfdx, dfdx)

   state.denom:copy(state.v):sqrt():add(epsilon)

   local biasCorrection1 = 1 - beta1^state.t
   local biasCorrection2 = 1 - beta2^state.t
   local stepSize = clr:mul(math.sqrt(biasCorrection2)/biasCorrection1)
   -- (4) update x
   for i=1,lrs:size(1)-1 do
      x[{{lrs[i]+1,lrs[i+1]}}]:addcdiv(-stepSize[i], state.m[{{lrs[i]+1,lrs[i+1]}}], state.denom[{{lrs[i]+1,lrs[i+1]}}])
   end

   -- return x*, f(x) before optimization
   return x, {fx}
end