# LLR, "learning with linear rewards". 
# Based on https://arxiv.org/abs/1011.4748. 

mutable struct LLR <: Policy end

struct LLRDetails <: PolicyDetails
  solverTime::Float64
end

function choose_action(instance::CombinatorialInstance, policy::LLR, state::State{T}; with_trace::Bool=false) where T
  nArms = length(state.arm_counts)
  if any(v == 0 for v in values(state.arm_counts))
    # Initialisation step: for each random variable (i.e. reward source), try arms, preferring those that have never been tested. 
    weights = Dict(arm => (state.arm_counts[arm] == 0.0) ? 1.0 : 0.0 for arm in keys(state.arm_counts))
  else
    # Determine the weights for each arm and use them to solve the combinatorial problem. 
    weights = Dict(arm => state.arm_average_reward[arm] + sqrt(((nArms + 1) * log(state.round + 1)) / state.arm_counts[arm]) for arm in keys(state.arm_counts))
  end
  
  t0 = now()
  sol = solve_linear(instance, weights)
  t1 = now()

  if with_trace
    return sol, LLRDetails((t1 - t0).value)
  else 
    return sol
  end
end