@testset "Policies" begin
  @testset "Thompson sampling" begin
    @testset "Algorithm" begin
      n = 3
      epsilon = .1
      reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
      instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

      # TODO: Cbc seems to output garbage for n_rounds - 1 iterations (probably because it does not support hot starts). 
      srand(1)
      n_rounds = 20
      s = simulate(instance, ThompsonSampling(), n_rounds)
      @test s.round == n_rounds
      @test sum(values(s.arm_counts)) == n_rounds * n
      @test s.regret ≈ 5.0
      @test s.reward ≈ 31.0
      @test s.regret + s.reward ≈ 36.0

      @test s.arm_counts[(1, 1)] == 10
      @test s.arm_counts[(1, 2)] == 7
      @test s.arm_counts[(1, 3)] == 3
      @test s.arm_counts[(2, 1)] == 4
      @test s.arm_counts[(2, 2)] == 11
      @test s.arm_counts[(2, 3)] == 5
      @test s.arm_counts[(3, 1)] == 6
      @test s.arm_counts[(3, 2)] == 2
      @test s.arm_counts[(3, 3)] == 12
      
      @test s.arm_reward[(1, 1)] ≈ 3.0
      @test s.arm_reward[(1, 2)] ≈ 4.0
      @test s.arm_reward[(1, 3)] ≈ 0.0
      @test s.arm_reward[(2, 1)] ≈ 1.0
      @test s.arm_reward[(2, 2)] ≈ 9.0
      @test s.arm_reward[(2, 3)] ≈ 3.0
      @test s.arm_reward[(3, 1)] ≈ 3.0
      @test s.arm_reward[(3, 2)] ≈ 0.0
      @test s.arm_reward[(3, 3)] ≈ 8.0
      
      for arm in keys(s.arm_counts)
        @test s.arm_average_reward[arm] ≈ s.arm_reward[arm] / s.arm_counts[arm]
      end
    end

    @testset "Trace" begin
      n = 3
      epsilon = .1
      reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
      instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

      n_rounds = 20
      test_policy_trace(instance, ThompsonSampling(), n_rounds, n_rounds * n, 5.0, 31.0, 36.0)
    end
  end
  
  @testset "LLR" begin
    @testset "Algorithm" begin
      n = 3
      epsilon = .1
      reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
      instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

      # TODO: Cbc seems to output garbage for n_rounds - 1 iterations (probably because it does not support hot starts). 
      srand(1)
      n_rounds = 20
      s = simulate(instance, LLR(), n_rounds)
      @test s.round == n_rounds
      @test sum(values(s.arm_counts)) == n_rounds * n
      @test s.regret ≈ 1.0 
      @test s.reward ≈ 35.0
      @test s.regret + s.reward ≈ 36.0

      @test s.arm_counts[(1, 1)] == 6
      @test s.arm_counts[(1, 2)] == 7
      @test s.arm_counts[(1, 3)] == 7
      @test s.arm_counts[(2, 1)] == 8
      @test s.arm_counts[(2, 2)] == 6
      @test s.arm_counts[(2, 3)] == 6
      @test s.arm_counts[(3, 1)] == 6
      @test s.arm_counts[(3, 2)] == 7
      @test s.arm_counts[(3, 3)] == 7
      
      @test s.arm_reward[(1, 1)] ≈ 3.0
      @test s.arm_reward[(1, 2)] ≈ 3.0
      @test s.arm_reward[(1, 3)] ≈ 6.0
      @test s.arm_reward[(2, 1)] ≈ 6.0
      @test s.arm_reward[(2, 2)] ≈ 3.0
      @test s.arm_reward[(2, 3)] ≈ 3.0
      @test s.arm_reward[(3, 1)] ≈ 2.0
      @test s.arm_reward[(3, 2)] ≈ 3.0
      @test s.arm_reward[(3, 3)] ≈ 6.0
      
      for arm in keys(s.arm_counts)
        @test s.arm_average_reward[arm] ≈ s.arm_reward[arm] / s.arm_counts[arm]
      end
    end

    @testset "Trace" begin
      n = 3
      epsilon = .1
      reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
      instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

      n_rounds = 20
      test_policy_trace(instance, LLR(), n_rounds, n_rounds * n, 1.0, 35.0, 36.0)
    end
  end
  
  @testset "CUCB" begin
    @testset "Algorithm" begin
      n = 3
      epsilon = .1
      reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
      instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

      # TODO: Cbc seems to output garbage for n_rounds - 1 iterations (probably because it does not support hot starts). 
      srand(1)
      n_rounds = 20
      s = simulate(instance, CUCB(), n_rounds)
      @test s.round == n_rounds
      @test sum(values(s.arm_counts)) == n_rounds * n
      @test s.regret ≈ 2.0
      @test s.reward ≈ 34.0
      @test s.regret + s.reward ≈ 36.0

      @test s.arm_counts[(1, 1)] == 3
      @test s.arm_counts[(1, 2)] == 10
      @test s.arm_counts[(1, 3)] == 7
      @test s.arm_counts[(2, 1)] == 12
      @test s.arm_counts[(2, 2)] == 3
      @test s.arm_counts[(2, 3)] == 5
      @test s.arm_counts[(3, 1)] == 5
      @test s.arm_counts[(3, 2)] == 7
      @test s.arm_counts[(3, 3)] == 8
      
      @test s.arm_reward[(1, 1)] ≈ 0.0
      @test s.arm_reward[(1, 2)] ≈ 6.0
      @test s.arm_reward[(1, 3)] ≈ 4.0
      @test s.arm_reward[(2, 1)] ≈ 9.0
      @test s.arm_reward[(2, 2)] ≈ 0.0
      @test s.arm_reward[(2, 3)] ≈ 3.0
      @test s.arm_reward[(3, 1)] ≈ 2.0
      @test s.arm_reward[(3, 2)] ≈ 3.0
      @test s.arm_reward[(3, 3)] ≈ 7.0
      
      for arm in keys(s.arm_counts)
        @test s.arm_average_reward[arm] ≈ s.arm_reward[arm] / s.arm_counts[arm]
      end
    end

    @testset "Trace" begin
      n = 3
      epsilon = .1
      reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
      instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

      n_rounds = 20
      test_policy_trace(instance, CUCB(), n_rounds, n_rounds * n, 2.0, 34.0, 36.0)
    end
  end
  
  @testset "ECSB2" begin
    n = 3
    epsilon = .1
    reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
    instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

    @testset "FPTAS" begin
      @testset "Algorithm" begin 
        # TODO: Cbc seems to output garbage for n_rounds - 1 iterations (probably because it does not support hot starts). 
        srand(1)
        n_rounds = 20
        s = simulate(instance, ECSB2(.5, ECSB2FPTAS()), n_rounds)
        @test s.round == n_rounds
        @test sum(values(s.arm_counts)) == n_rounds * n
        @test s.regret ≈ -1.0 
        @test s.reward ≈ 37.0
        @test s.regret + s.reward ≈ 36.0

        @test s.arm_counts[(1, 1)] == 6
        @test s.arm_counts[(1, 2)] == 8
        @test s.arm_counts[(1, 3)] == 6
        @test s.arm_counts[(2, 1)] == 6
        @test s.arm_counts[(2, 2)] == 8
        @test s.arm_counts[(2, 3)] == 6
        @test s.arm_counts[(3, 1)] == 8
        @test s.arm_counts[(3, 2)] == 4
        @test s.arm_counts[(3, 3)] == 8
        
        @test s.arm_reward[(1, 1)] ≈ 3.0
        @test s.arm_reward[(1, 2)] ≈ 4.0
        @test s.arm_reward[(1, 3)] ≈ 5.0
        @test s.arm_reward[(2, 1)] ≈ 3.0
        @test s.arm_reward[(2, 2)] ≈ 4.0
        @test s.arm_reward[(2, 3)] ≈ 4.0
        @test s.arm_reward[(3, 1)] ≈ 5.0
        @test s.arm_reward[(3, 2)] ≈ 1.0
        @test s.arm_reward[(3, 3)] ≈ 8.0
        
        for arm in keys(s.arm_counts)
          @test s.arm_average_reward[arm] ≈ s.arm_reward[arm] / s.arm_counts[arm]
        end
      end
  
      @testset "Trace" begin
        n = 3
        epsilon = .1
        reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
        instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))
  
        n_rounds = 20
        test_policy_trace(instance, ECSB2(.5, ECSB2FPTAS()), n_rounds, n_rounds * n, -1.0, 37.0, 36.0)
      end
    end

    @testset "Exact" begin
      @testset "Algorithm" begin
        # TODO: Cbc seems to output garbage for n_rounds - 1 iterations (probably because it does not support hot starts). 
        srand(1)
        n_rounds = 20
        s = simulate(instance, ECSB2(.5, ECSB2Exact(PajaritoSolver(mip_solver=CbcSolver(logLevel=0), cont_solver=SCSSolver(verbose=0), log_level=0))), n_rounds)
        @test s.round == n_rounds
        @test sum(values(s.arm_counts)) == n_rounds * n
        @test s.regret ≈ -2.0 
        @test s.reward ≈ 38.0
        @test s.regret + s.reward ≈ 36.0

        @test s.arm_counts[(1, 1)] == 6
        @test s.arm_counts[(1, 2)] == 8
        @test s.arm_counts[(1, 3)] == 6
        @test s.arm_counts[(2, 1)] == 6
        @test s.arm_counts[(2, 2)] == 8
        @test s.arm_counts[(2, 3)] == 6
        @test s.arm_counts[(3, 1)] == 8
        @test s.arm_counts[(3, 2)] == 4
        @test s.arm_counts[(3, 3)] == 8
        
        @test s.arm_reward[(1, 1)] ≈ 3.0
        @test s.arm_reward[(1, 2)] ≈ 4.0
        @test s.arm_reward[(1, 3)] ≈ 4.0
        @test s.arm_reward[(2, 1)] ≈ 3.0
        @test s.arm_reward[(2, 2)] ≈ 5.0
        @test s.arm_reward[(2, 3)] ≈ 5.0
        @test s.arm_reward[(3, 1)] ≈ 5.0
        @test s.arm_reward[(3, 2)] ≈ 1.0
        @test s.arm_reward[(3, 3)] ≈ 8.0
        
        for arm in keys(s.arm_counts)
          @test s.arm_average_reward[arm] ≈ s.arm_reward[arm] / s.arm_counts[arm]
        end
        
      end

      @testset "Trace" begin
        n = 3
        epsilon = .1
        reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
        instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))

        n_rounds = 20
        test_policy_trace(instance, ECSB2(.5, ECSB2Exact(PajaritoSolver(mip_solver=CbcSolver(logLevel=0), cont_solver=SCSSolver(verbose=0), log_level=0))), n_rounds, n_rounds * n, -2.0, 38.0, 36.0)
      end
    end

    @testset "Greedy" begin
      @testset "Algorithm" begin
        # TODO: Cbc seems to output garbage for n_rounds - 1 iterations (probably because it does not support hot starts). 
        srand(1)
        n_rounds = 20
        s = simulate(instance, ECSB2(.5, ECSB2Greedy()), n_rounds)
        @test s.round == n_rounds
        @test sum(values(s.arm_counts)) == n_rounds * n
        @test s.regret ≈ -2.0 
        @test s.reward ≈ 38.0
        @test s.regret + s.reward ≈ 36.0

        @test s.arm_counts[(1, 1)] == 8
        @test s.arm_counts[(1, 2)] == 6
        @test s.arm_counts[(1, 3)] == 6
        @test s.arm_counts[(2, 1)] == 6
        @test s.arm_counts[(2, 2)] == 7
        @test s.arm_counts[(2, 3)] == 7
        @test s.arm_counts[(3, 1)] == 6
        @test s.arm_counts[(3, 2)] == 7
        @test s.arm_counts[(3, 3)] == 7
        
        @test s.arm_reward[(1, 1)] ≈ 7.0
        @test s.arm_reward[(1, 2)] ≈ 4.0
        @test s.arm_reward[(1, 3)] ≈ 3.0
        @test s.arm_reward[(2, 1)] ≈ 3.0
        @test s.arm_reward[(2, 2)] ≈ 6.0
        @test s.arm_reward[(2, 3)] ≈ 4.0
        @test s.arm_reward[(3, 1)] ≈ 2.0
        @test s.arm_reward[(3, 2)] ≈ 5.0
        @test s.arm_reward[(3, 3)] ≈ 4.0
        
        for arm in keys(s.arm_counts)
          @test s.arm_average_reward[arm] ≈ s.arm_reward[arm] / s.arm_counts[arm]
        end
      end

      @testset "Trace" begin
        n = 3
        epsilon = .1
        reward = Distribution[Bernoulli(.5 + ((i == j) ? epsilon : 0.)) for i in 1:n, j in 1:n]
        instance = PerfectBipartiteMatching(reward, PerfectBipartiteMatchingLPSolver(CbcSolver(logLevel=0)))
  
        n_rounds = 20
        test_policy_trace(instance, ECSB2(.5, ECSB2Greedy()), n_rounds, n_rounds * n, -2.0, 38.0, 36.0)
      end
    end
  end
end
