#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets,
which used in IMAPLA algorithm.

The following code is mainly referenced and copied from:
https://github.com/deepmind/scalable_agent/blob/master/vtrace.py

For details and theory see:

"Espeholt L, Soyer H, Munos R, et al. Impala: Scalable distributed 
deep-rl with importance weighted actor-learner 
architectures[J]. arXiv preprint arXiv:1802.01561, 2018."

"""

import collections
import paddle.fluid as fluid
from parl.core.fluid import layers
from parl.utils import MAX_INT32

VTraceReturns = collections.namedtuple('VTraceReturns',
                                       ['vs', 'pg_advantages'])


def from_importance_weights(behaviour_actions_log_probs,
                            target_actions_log_probs,
                            discounts,
                            rewards,
                            values,
                            bootstrap_value,
                            clip_rho_threshold=1.0,
                            clip_pg_rho_threshold=1.0,
                            name='vtrace_from_logits'):
    r"""V-trace for softmax policies.

    Calculates V-trace actor critic targets for softmax polices as described in

    "IMPALA: Scalable Distributed Deep-RL with
    Importance Weighted Actor-Learner Architectures"
    by Espeholt, Soyer, Munos et al.

    Target policy refers to the policy we are interested in improving and
    behaviour policy refers to the policy that generated the given
    rewards and actions.

    In the notation used throughout documentation and comments, T refers to the
    time dimension ranging from 0 to T-1. B refers to the batch size and
    NUM_ACTIONS refers to the number of actions.

    Args:
      behaviour_actions_log_probs: A float32 tensor of shape [T, B] of
        log-probabilities of actions in behaviour policy.
      target_policy_logits: A float32 tensor of shape [T, B] of
        log-probabilities of actions in target policy.
      discounts: A float32 tensor of shape [T, B] with the discount encountered
        when following the behaviour policy.
      rewards: A float32 tensor of shape [T, B] with the rewards generated by
        following the behaviour policy.
      values: A float32 tensor of shape [T, B] with the value function estimates
        wrt. the target policy.
      bootstrap_value: A float32 of shape [B] with the value function estimate at
        time T.
      clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
        importance weights (rho) when calculating the baseline targets (vs).
        rho^bar in the paper.
      clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
        on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
      name: The name scope that all V-trace operations will be created in.

    Returns:
      A VTraceReturns namedtuple (vs, pg_advantages) where:
        vs: A float32 tensor of shape [T, B]. Can be used as target to
          train a baseline (V(x_t) - vs_t)^2.
        pg_advantages: A float32 tensor of shape [T, B]. Can be used as the
          advantage in the calculation of policy gradients.
    """

    rank = len(behaviour_actions_log_probs.shape)  # Usually 2.
    assert len(target_actions_log_probs.shape) == rank
    assert len(values.shape) == rank
    assert len(bootstrap_value.shape) == (rank - 1)
    assert len(discounts.shape) == rank
    assert len(rewards.shape) == rank

    # log importance sampling weights.
    # V-trace performs operations on rhos in log-space for numerical stability.
    log_rhos = target_actions_log_probs - behaviour_actions_log_probs

    if clip_rho_threshold is not None:
        clip_rho_threshold = layers.fill_constant([1], 'float32',
                                                  clip_rho_threshold)
    if clip_pg_rho_threshold is not None:
        clip_pg_rho_threshold = layers.fill_constant([1], 'float32',
                                                     clip_pg_rho_threshold)

    rhos = layers.exp(log_rhos)
    if clip_rho_threshold is not None:
        clipped_rhos = layers.elementwise_min(rhos, clip_rho_threshold)
    else:
        clipped_rhos = rhos

    constant_one = layers.fill_constant([1], 'float32', 1.0)
    cs = layers.elementwise_min(rhos, constant_one)

    # Append bootstrapped value to get [v1, ..., v_t+1]
    values_1_t = layers.slice(values, axes=[0], starts=[1], ends=[MAX_INT32])
    values_t_plus_1 = layers.concat(
        [values_1_t, layers.unsqueeze(bootstrap_value, [0])], axis=0)

    # \delta_s * V
    deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)

    vs_minus_v_xs = recursively_scan(discounts, cs, deltas)

    # Add V(x_s) to get v_s.
    vs = layers.elementwise_add(vs_minus_v_xs, values)

    # Advantage for policy gradient.
    vs_1_t = layers.slice(vs, axes=[0], starts=[1], ends=[MAX_INT32])
    vs_t_plus_1 = layers.concat(
        [vs_1_t, layers.unsqueeze(bootstrap_value, [0])], axis=0)

    if clip_pg_rho_threshold is not None:
        clipped_pg_rhos = layers.elementwise_min(rhos, clip_pg_rho_threshold)
    else:
        clipped_pg_rhos = rhos
    pg_advantages = (
        clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))

    # Make sure no gradients backpropagated through the returned values.
    vs.stop_gradient = True
    pg_advantages.stop_gradient = True
    return VTraceReturns(vs=vs, pg_advantages=pg_advantages)


def recursively_scan(discounts, cs, deltas):
    r""" Recursively calculate vs_minus_v_xs according to following equation:
    vs_minus_v_xs(t) = deltas(t) + discounts(t) * cs(t) * vs_minus_v_xs(t + 1)

    Args:
        discounts: A float32 tensor of shape [T, B] with discounts encountered when
                   following the behaviour policy.
        cs: A float32 tensor of shape [T, B], which corresponding to $c_s$ in the
            origin paper.
        deltas: A float32 tensor of shape [T, B], which corresponding to 
                $\delta_s * V$ in the origin paper.

    Returns:
        vs_minus_v_xs: A float32 tensor of shape [T, B], which corresponding to 
                       $v_s - V(x_s)$ in the origin paper.
    """

    # All sequences are reversed, computation starts from the back.
    reverse_discounts = layers.reverse(x=discounts, axis=[0])
    reverse_cs = layers.reverse(x=cs, axis=[0])
    reverse_deltas = layers.reverse(x=deltas, axis=[0])

    static_while = layers.StaticRNN()
    # init: shape [B]
    init = layers.fill_constant_batch_size_like(
        discounts, shape=[1], dtype='float32', value=0.0, input_dim_idx=1)

    with static_while.step():
        discount_t = static_while.step_input(reverse_discounts)
        c_t = static_while.step_input(reverse_cs)
        delta_t = static_while.step_input(reverse_deltas)

        vs_minus_v_xs_t_plus_1 = static_while.memory(init=init)
        vs_minus_v_xs_t = delta_t + discount_t * c_t * vs_minus_v_xs_t_plus_1

        static_while.update_memory(vs_minus_v_xs_t_plus_1, vs_minus_v_xs_t)

        static_while.step_output(vs_minus_v_xs_t)

    vs_minus_v_xs = static_while()

    # Reverse the results back to original order.
    vs_minus_v_xs = layers.reverse(vs_minus_v_xs, [0])

    return vs_minus_v_xs
