module CombinatorialBandits
  using JuMP
  using Distributions
  using DataStructures
  import Convex # Not "using" to avoid clashes with JuMP. 
  import Munkres # Avoid clashing with Hungarian. 
  import Hungarian
  using LightGraphs

  import Base: push!, copy, hash, isequal

  export Policy, CombinatorialInstance, State, 
         initial_state, initial_trace, simulate, choose_action, pull, update!, solve_linear, is_feasible, optimal_reward, 
         ThompsonSampling, LLR, CUCB, ECSB2, 
         ThompsonSamplingDetails, LLRDetails, CUCBDetails, ECSB2Details, ECSB2Exact, ECSB2FPTAS, ECSB2Greedy, 
         PerfectBipartiteMatching, PerfectBipartiteMatchingSolver, PerfectBipartiteMatchingLPSolver, PerfectBipartiteMatchingMunkresSolver, PerfectBipartiteMatchingHungarianSolver, 
         CompleteGraphShortestPath, CompleteGraphShortestPathSolver, CompleteGraphShortestPathLightGraphsDijkstraSolver

  # General algorithm. 
  # Define a new policy: subtype Policy, implement choose_action. Also subtype PolicyDetails to provide more details about the execution within the trace (can be empty, though). 
  # Define a new problem type: subtype CombinatorialInstance (it must provide a n_arms property), implement initial_state, initial_trace (optional, only required to start simulate with with_trace=true), pull, solve_linear, is_feasible, optimal_reward (defaults to returning the optimal_reward field). 
  abstract type Policy end
  abstract type PolicyDetails end
  abstract type CombinatorialInstance end

  # Define the state of a bandit (evolves at each round). 
  mutable struct State{T} 
    round::Int
    regret::Float64
    reward::Float64
    arm_counts::Dict{T, Int}
    arm_reward::Dict{T, Float64}
    arm_average_reward::Dict{T, Float64}
  end

  copy(s::State{T}) where T = State{T}(s.round, s.regret, s.reward, s.arm_counts, s.arm_reward, s.arm_average_reward)
  # hash(s::State{T}, h::UInt) where T = hash(s.round, hash(s.regret, hash(s.reward, hash(s.arm_counts, 
  #                                           hash(s.arm_reward, hash(s.arm_average_reward, hash(:State, h)))))))
  # isequal(s1::State{T}, s2::State{T}) where T = s1.round == s2.round && s1.regret == s2.regret && s1.reward && s2.reward && 
  #                                               s1.arm_counts == s2.arm_counts && s1.arm_reward == s2.arm_reward && s1.arm_average_reward == s2.arm_average_reward

  # Define the trace of the execution throughout the rounds. 
  struct Trace{T}
    states::Vector{State{T}}
    arms::Vector{Vector{T}}
    reward::Vector{Vector{Float64}}
    policyDetails::Vector{PolicyDetails}
  end

  """
      push!(trace::Trace{T}, state::State{T}, arms::Vector{T}, reward::Vector{Float64}, policyDetails::PolicyDetails) where T

  Appends the arguments to the execution trace of the bandit algorithm. More specifically, `trace`'s data structures are 
  updated to also include `state`, `arms`, `reward`, and `policyDetails`. All of these arguments are copied, *except* `policyDetails`. 
  (Indeed, the usual scenario is to keep updating the state, the arms and the rewards, but to build the details at each round from the ground up.)
  """
  function push!(trace::Trace{T}, state::State{T}, arms::Vector{T}, reward::Vector{Float64}, policyDetails::PolicyDetails) where T
    push!(trace.states, copy(state))
    push!(trace.arms, copy(arms))
    push!(trace.reward, copy(reward))
    push!(trace.policyDetails, policyDetails)
  end

  # Interface for instances. 
  function initial_state(instance::CombinatorialInstance) end
  function initial_trace(instance::CombinatorialInstance) end
  function pull(instance::CombinatorialInstance, arms::Vector{T}) where T end
  function solve_linear(instance::CombinatorialInstance, weights::Dict{T, Float64}) where T end
  function is_feasible(instance::CombinatorialInstance, arms::Vector{T}) where T end
  function optimal_reward(instance::CombinatorialInstance) return instance.optimal_reward end

  # Interface for policies. 
  function choose_action(instance::CombinatorialInstance, policy::Policy, state::State{T}) where T end

  # Update the state before the new round. 
  function update!(state::State{T}, instance::CombinatorialInstance, arms::Vector{T}, reward::Vector{Float64}) where T
    state.round += 1

    # One reward per arm, i.e. semi-bandit feedback (not bandit feedback, where there would be only one reward for all arms). 
    for i in 1:length(arms)
      state.arm_counts[arms[i]] += 1
      state.arm_reward[arms[i]] += reward[i]
      state.arm_average_reward[arms[i]] = state.arm_reward[arms[i]] / state.arm_counts[arms[i]]
      state.reward += reward[i]
    end

    state.regret += optimal_reward(instance) - sum(reward)
  end

  # Use the bandit for the given number of steps. 
  function simulate(instance::CombinatorialInstance, policy::Policy, steps::Int; with_trace::Bool=false)
    state = initial_state(instance)
    if with_trace
      trace = initial_trace(instance)
    end

    for i in 1:steps
      if with_trace
        arms, runDetails = choose_action(instance, policy, state, with_trace=with_trace)
      else
        arms = choose_action(instance, policy, state)
      end

      if length(arms) == 0
        error("No arms have been chosen at round $(i)!")
      end

      reward = pull(instance, arms)
      update!(state, instance, arms, reward)

      if with_trace
        push!(trace, state, arms, reward, runDetails)
      end
    end

    if ! with_trace
      return state
    else
      return state, trace
    end
  end

  ## Bandits policies. 
  include("policies/thompson.jl")
  include("policies/llr.jl")
  include("policies/cucb.jl")
  include("policies/ecsb2.jl")

  ## Potential problems to solve. 
  include("instances/perfectbipartitematching.jl")
  include("instances/perfectbipartitematching_lp.jl")
  include("instances/perfectbipartitematching_munkres.jl")
  include("instances/perfectbipartitematching_hungarian.jl")
  include("instances/completegraphshortestpath.jl")
  include("instances/completegraphshortestpath_lightgraphsdijkstra.jl")

  ## Things that must be included afterwards. 
  include("policies/ecsb2_exact.jl") # Uses both policies/ and instances/ definitions. 
  include("policies/ecsb2_fptas.jl")
  include("policies/ecsb2_greedy.jl")
end