# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Compute the exploitability of a bot / strategy in a 2p sequential game.

This computes the value that a policy achieves against a worst-case opponent.
The policy applies to both player 1 and player 2, and hence we have a 2-player
symmetric zero-sum game, so the game value is zero for both players, and hence
value-vs-best-response is equal to exploitability.

We construct information sets, each consisting of a list of (state, probability)
pairs where probability is a counterfactual reach probability, i.e. the
probability that the state would be reached if the best responder (the current
player) played to reach it. This is the product of the probabilities of the
necessary chance events and opponent action choices required to reach the node.

These probabilities give us the correct weighting for possible states of the
world when considering our best response for a particular information set.

The values we calculate are values of being in the specific state. Unlike in a
CFR algorithm, they are not weighted by reach probabilities. These values
take into account the whole state, so they may depend on information which is
unknown to the best-responding player.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections

import numpy as np

import pyspiel


def _memoize_method(method):
  """Memoize a single-arg instance method using an on-object cache."""
  cache_name = "cache_" + method.__name__

  def wrap(self, arg):
    key = str(arg)
    cache = vars(self).setdefault(cache_name, {})
    if key not in cache:
      cache[key] = method(self, arg)
    return cache[key]

  return wrap


def _state_values(state, num_players, policy):
  """Value of a state for every player given a policy."""
  if state.is_terminal():
    return np.array(state.returns())
  else:
    p_action = (
        state.chance_outcomes() if state.is_chance_node() else
        policy.action_probabilities(state).items())
    return sum(prob * _state_values(state.child(action), num_players, policy)
               for action, prob in p_action)


class _BestResponse(object):
  """Computes the best response to a specified strategy."""

  def __init__(self, game, player_id, policy, root_state):
    """Initializes the best-response calculation.

    Args:
      game: The game to analyze.
      player_id: The player id of the best-responder.
      policy: A callable, taking a state and returning a list of (action,
        probability) pairs.
      root_state: The state of the game at which to start analysis.
    """
    self._num_players = game.num_players()
    self._player_id = player_id
    self._policy = policy
    self._root_state = root_state
    self.infosets = self.info_sets(root_state)

  def info_sets(self, state):
    """Returns a dict of infostatekey to list of (state, cf_probability)."""
    infosets = collections.defaultdict(list)
    for s, p in self.decision_nodes(state):
      infosets[s.information_state(self._player_id)].append((s, p))
    return dict(infosets)

  def decision_nodes(self, parent_state):
    """Yields a (state, cf_prob) pair for each descendant decision node."""
    if not parent_state.is_terminal():
      if parent_state.current_player() == self._player_id:
        yield (parent_state, 1.0)
      for action, p_action in self.transitions(parent_state):
        for state, p_state in self.decision_nodes(parent_state.child(action)):
          yield (state, p_state * p_action)

  def transitions(self, state):
    """Returns a list of (action, cf_prob) pairs from the specifed state."""
    if state.current_player() == self._player_id:
      # Counterfactual reach probabilities exclude the best-responder's actions,
      # hence return probability 1.0 for every action.
      return [(action, 1.0) for action in state.legal_actions()]
    elif state.is_chance_node():
      return state.chance_outcomes()
    else:
      return list(self._policy.action_probabilities(state).items())

  @_memoize_method
  def value(self, state):
    """Returns the value of the specified state to the best-responder."""
    if state.is_terminal():
      return state.player_return(self._player_id)
    elif state.current_player() == self._player_id:
      action = self.best_response_action(
          state.information_state(self._player_id))
      return self.q_value(state, action)
    else:
      return sum(p * self.q_value(state, a) for a, p in self.transitions(state))

  def q_value(self, state, action):
    """Returns the value of the (state, action) to the best-responder."""
    return self.value(state.child(action))

  @_memoize_method
  def best_response_action(self, infostate):
    """Returns the best response for this information state."""
    infoset = self.infosets[infostate]
    # Get actions from the first (state, cf_prob) pair in the infoset list.
    # Return the best action by counterfactual-reach-weighted state-value.
    return max(
        infoset[0][0].legal_actions(),
        key=lambda a: sum(cf_p * self.q_value(s, a) for s, cf_p in infoset))


def best_response(game, policy, player_id):
  """Returns information about the specified player's best response.

  Given a game and a policy for every player, computes for a single player their
  best unilateral strategy. Returns the value improvement that player would
  get, the action they should take in each information state, and the value
  of each state when following their unilateral policy.

  Args:
    game: An open_spiel game, e.g. kuhn_poker
    policy: A `policy.Policy` object. This policy should depend only on the
      information state available to the current player, but this is not
      enforced.
    player_id: The integer id of a player in the game for whom the best response
      will be computed.

  Returns:
    A dictionary of values, with keys:
      best_response_action: The best unilateral strategy for `player_id` as a
        map from infostatekey to action_id.
      best_response_state_value: The value obtained for `player_id` when
        unilaterally switching strategy, for each state.
      best_response_value: The value obtained for `player_id` when unilaterally
        switching strategy.
      info_sets: A dict of info sets, mapping info state key to a list of
        `(state, counterfactual_reach_prob)` pairs.
      nash_conv: `best_response_value - on_policy_value`
      on_policy_value: The value for `player_id` when all players follow the
        policy
      on_policy_values: The value for each player when all players follow the
        policy
  """
  root_state = game.new_initial_state()
  br = _BestResponse(game, player_id, policy, root_state)
  on_policy_values = _state_values(root_state, game.num_players(), policy)
  best_response_value = br.value(root_state)

  # Get best response action for unvisited states
  for infostate in set(br.infosets) - set(br.cache_best_response_action):
    br.best_response_action(infostate)

  return {
      "best_response_action": br.cache_best_response_action,
      "best_response_state_value": br.cache_value,
      "best_response_value": best_response_value,
      "info_sets": br.infosets,
      "nash_conv": best_response_value - on_policy_values[player_id],
      "on_policy_value": on_policy_values[player_id],
      "on_policy_values": on_policy_values,
  }


def exploitability(game, policy):
  """Returns the exploitability of the policy in the game.

  This is implemented only for 2 players constant-sum games, and is equivalent
  to NashConv / num_players in that case. Prefer using `nash_conv`.

  Args:
    game: An open_spiel game, e.g. kuhn_poker
    policy: A `policy.Policy` object. This policy should depend only on the
      information state available to the current player, but this is not
      enforced.

  Returns:
    The value that this policy achieves when playing against the worst-case
    non-cheating opponent, averaged across both starting positions. It has a
    minimum of zero (assuming the supplied policy is non-cheating) and
    this bound is achievable in a 2p game.

  Raises:
    ValueError if the game is not a two-player constant-sum turn-based game.
  """
  if game.num_players() != 2:
    raise ValueError("Game must be a 2-player game")
  game_info = game.get_type()
  if game_info.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
    raise ValueError("The game must be turn-based, not {}".format(
        game_info.dynamics))
  if game_info.utility not in (pyspiel.GameType.Utility.ZERO_SUM,
                               pyspiel.GameType.Utility.CONSTANT_SUM):
    raise ValueError("The game must be constant- or zero-sum, not {}".format(
        game_info.utility))
  root_state = game.new_initial_state()
  nash_conv_value = (
      sum(
          _BestResponse(game, best_responder, policy, root_state).value(
              root_state)
          for best_responder in range(game.num_players())) - game.utility_sum())
  return nash_conv_value / game.num_players()


_NashConvReturn = collections.namedtuple("_NashConvReturn",
                                         ["nash_conv", "player_improvements"])


def nash_conv(game, policy, return_only_nash_conv=True):
  r"""Returns a measure of closeness to Nash for a policy in the game.

  See https://arxiv.org/pdf/1711.00832.pdf for the NashConv definition.

  Args:
    game: An open_spiel game, e.g. kuhn_poker
    policy: A `policy.Policy` object. This policy should depend only on the
      information state available to the current player, but this is not
      enforced.
    return_only_nash_conv: Whether to only return the NashConv value, or a
      namedtuple containing additional statistics. Prefer using `False`, as we
      hope to change the default to that value.

  Returns:
    Returns a object with the following attributes:
    - player_improvements: A `[num_players]` numpy array of the improvement
      for players (i.e. value_player_p_versus_BR - value_player_p).
    - nash_conv: The sum over all players of the improvements in value that each
      player could obtain by unilaterally changing their strategy, i.e.
      sum(player_improvements).
  """
  root_state = game.new_initial_state()
  best_response_values = np.array([
      _BestResponse(game, best_responder, policy, root_state).value(root_state)
      for best_responder in range(game.num_players())
  ])
  on_policy_values = _state_values(root_state, game.num_players(), policy)
  player_improvements = best_response_values - on_policy_values
  nash_conv_ = sum(player_improvements)
  if return_only_nash_conv:
    return nash_conv_
  else:
    return _NashConvReturn(
        nash_conv=nash_conv_, player_improvements=on_policy_values)
