from typing import List, Optional, Type, Union

import numpy as np
import torch
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.ppo import PPO, PPOConfig
from ray.rllib.algorithms.ppo.ppo_torch_policy import PPOTorchPolicy
from ray.rllib.evaluation.postprocessing import (
    Postprocessing,
    compute_gae_for_sample_batch,
    discount_cumsum,
)
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.torch_utils import explained_variance, sequence_mask
from ray.rllib.utils.typing import ModelGradients, TensorType
from torch.nn.utils import parameters_to_vector, vector_to_parameters


def get_device():
    if torch.cuda.is_available():
        return f"cuda:{torch.cuda.current_device()}"
    else:
        return "cpu"


def patch_none_gradients(grads, params):
    grads = list(grads)
    for i in range(len(grads)):
        if grads[i] is None:
            grads[i] = torch.zeros_like(params[i])

    return grads


def flat_grad(grads):
    grad_flatten = []
    for grad in grads:
        if grad is None:
            continue
        grad_flatten.append(grad.reshape(-1))
    grad_flatten = torch.cat(grad_flatten)
    return grad_flatten


def flat_hessian(hessians):
    hessians_flatten = []
    for hessian in hessians:
        if hessian is None:
            continue
        hessians_flatten.append(hessian.contiguous().view(-1))
    hessians_flatten = torch.cat(hessians_flatten).data
    return hessians_flatten


def flat_params(parameters):
    params = []
    for param in parameters:
        params.append(param.data.view(-1))
    params_flatten = torch.cat(params)
    return params_flatten


def _get_last_r(policy: Policy, sample_batch: SampleBatch):
    if sample_batch[SampleBatch.DONES][-1]:
        last_r = 0.0
    # Trajectory has been truncated -> last r=VF estimate of last obs.
    else:
        # Input dict is provided to us automatically via the Model's
        # requirements. It's a single-timestep (last one in trajectory)
        # input_dict.
        # Create an input dict according to the Model's requirements.
        input_dict = sample_batch.get_single_step_input_dict(
            policy.model.view_requirements, index="last"
        )
        last_r = policy._value(**input_dict)

    return last_r


def _add_deltas(sample_batch: SampleBatch, last_r: float, gamma: float):
    vpred_t = np.concatenate([sample_batch[SampleBatch.VF_PREDS], np.array([last_r])])
    delta_t = sample_batch[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1]

    sample_batch["DELTAS"] = delta_t

    return sample_batch


def _add_returns(sample_batch: SampleBatch, last_r: float, gamma: float):
    rewards_plus_v = np.concatenate(
        [sample_batch[SampleBatch.REWARDS], np.array([last_r])]
    )
    discounted_returns = discount_cumsum(rewards_plus_v, gamma)[:-1].astype(np.float32)

    sample_batch["RETURNS"] = discounted_returns

    return sample_batch


class TrustRegionUpdator:
    kl_threshold = 0.01
    ls_step = 15
    accept_ratio = 0.1
    back_ratio = 0.8
    atol = 1e-7
    critic_lr = 5e-3

    # delta = 0.01

    def __init__(
        self,
        model: ModelV2,
        dist_class: Type[ActionDistribution],
        train_batch: SampleBatch,
        adv_targ,
        initialize_policy_loss,
        initialize_critic_loss=None,
    ):
        self.model = model
        self.dist_class = dist_class
        self.train_batch = train_batch
        self.adv_targ = adv_targ
        self.initialize_policy_loss = initialize_policy_loss
        self.initialize_critic_loss = initialize_critic_loss
        self.stored_actor_parameters = None
        self.device = get_device()

    @property
    def actor_parameters(self):
        # MARLlib BaseMLP
        # def actor_parameters(self):
        #     return reduce(lambda x, y: x + y, map(lambda p: list(p.parameters()), self.actors))
        # TODO: we haven't separate actor and critic parameters.
        return list(self.model.parameters())

    @property
    def loss(self):
        logits, state = self.model(self.train_batch)
        try:
            curr_action_dist = self.dist_class(logits, self.model)
        except ValueError as e:
            print(e)

        logp_ratio = torch.exp(
            curr_action_dist.logp(self.train_batch[SampleBatch.ACTIONS])
            - self.train_batch[SampleBatch.ACTION_LOGP]
        )

        if state:
            B = len(self.train_batch[SampleBatch.SEQ_LENS])
            max_seq_len = logits.shape[0] // B
            mask = sequence_mask(
                self.train_batch[SampleBatch.SEQ_LENS],
                max_seq_len,
                time_major=self.model.is_time_major(),
            )
            mask = torch.reshape(mask, [-1])
            loss = (
                torch.sum(logp_ratio * self.adv_targ, dim=-1, keepdim=True) * mask
            ).sum() / mask.sum()
        else:
            loss = torch.sum(logp_ratio * self.adv_targ, dim=-1, keepdim=True).mean()

        new_loss = loss

        return new_loss

    @property
    def kl(self):
        _logits, _state = self.model(self.train_batch)
        _curr_action_dist = self.dist_class(_logits, self.model)
        action_dist_inputs = self.train_batch[SampleBatch.ACTION_DIST_INPUTS]
        _prev_action_dist = self.dist_class(action_dist_inputs, self.model)

        kl = _prev_action_dist.kl(_curr_action_dist)

        return kl

    @property
    def entropy(self):
        _logits, _state = self.model(self.train_batch)
        _curr_action_dist = self.dist_class(_logits, self.model)
        curr_entropy = _curr_action_dist.entropy()
        return curr_entropy

    @property
    def critic_parameters(self):
        # MARLlib BaseMLP
        # def critic_parameters(self):
        #     return reduce(lambda x, y: x + y, map(lambda p: list(p.parameters()), self.critics))
        # TODO: we haven't separate actor and critic parameters.
        return list(self.model.parameters())

    def set_actor_params(self, new_flat_params):
        vector_to_parameters(new_flat_params, self.actor_parameters)

    def store_current_actor_params(self):
        self.stored_actor_parameters = self.actor_parameters

    def recovery_actor_params_to_before_linear_search(self):
        stored_actor_parameters = flat_params(self.stored_actor_parameters)
        self.set_actor_params(stored_actor_parameters)

    def fisher_vector_product(self, p):
        p.detach()
        kl = self.kl.mean()
        kl_grads = torch.autograd.grad(
            kl, self.actor_parameters, create_graph=True, allow_unused=True
        )
        kl_grads = patch_none_gradients(kl_grads, self.actor_parameters)

        kl_grads = flat_grad(kl_grads)
        kl_grad_p = (kl_grads * p).sum()
        kl_hessian_p = torch.autograd.grad(
            kl_grad_p, self.actor_parameters, allow_unused=True
        )
        kl_hessian_p = patch_none_gradients(kl_hessian_p, self.actor_parameters)
        kl_hessian_p = flat_hessian(kl_hessian_p)
        return kl_hessian_p + 0.1 * p

    def conjugate_gradients(self, b, nsteps, residual_tol=1e-10):
        x = torch.zeros(b.size()).to(device=self.device)
        r = b.clone()
        p = b.clone()
        rdotr = torch.dot(r, r)
        for i in range(nsteps):
            _Avp = self.fisher_vector_product(p)
            alpha = rdotr / torch.dot(p, _Avp)
            x += alpha * p
            r -= alpha * _Avp
            new_rdotr = torch.dot(r, r)
            betta = new_rdotr / rdotr
            p = r + betta * p
            rdotr = new_rdotr
            if rdotr < residual_tol:
                break
        return x

    def update(self, update_critic=True):
        # with torch.backends.cudnn.flags(enabled=False):
        self.update_actor(self.initialize_policy_loss)
        if update_critic:
            self.update_critic(self.initialize_critic_loss)

    def update_critic(self, critic_loss):
        critic_loss_grad = torch.autograd.grad(
            critic_loss, self.critic_parameters, allow_unused=True
        )
        critic_loss_grad = patch_none_gradients(
            critic_loss_grad, self.critic_parameters
        )

        new_params = (
            parameters_to_vector(self.critic_parameters)
            - flat_grad(critic_loss_grad) * TrustRegionUpdator.critic_lr
        )

        vector_to_parameters(new_params, self.critic_parameters)

        return None

    def update_actor(self, policy_loss):

        loss_grad = torch.autograd.grad(
            policy_loss, self.actor_parameters, allow_unused=True, retain_graph=True
        )
        loss_grad = patch_none_gradients(loss_grad, self.actor_parameters)

        pol_grad = flat_grad(loss_grad)
        step_dir = self.conjugate_gradients(
            b=pol_grad.data,
            nsteps=10,
        )

        fisher_norm = pol_grad.dot(step_dir)
        scala = (
            0
            if fisher_norm < 0
            else torch.sqrt(2 * self.kl_threshold / (fisher_norm + 1e-8))
        )
        full_step = scala * step_dir
        loss = policy_loss.data.cpu().numpy()
        params = flat_grad(self.actor_parameters)
        self.store_current_actor_params()

        expected_improve = pol_grad.dot(full_step).item()
        linear_search_updated = False
        fraction = 1
        if expected_improve >= self.atol:
            for i in range(self.ls_step):
                new_params = params + fraction * full_step
                self.set_actor_params(new_params)
                new_loss = self.loss.data.cpu().numpy()
                loss_improve = new_loss - loss
                kl = self.kl.mean()
                if (
                    kl < self.kl_threshold
                    and (loss_improve / expected_improve) >= self.accept_ratio
                    and loss_improve.item() > 0
                ):
                    linear_search_updated = True
                    break
                else:
                    expected_improve *= self.back_ratio
                    fraction *= self.back_ratio

            if not linear_search_updated:
                self.recovery_actor_params_to_before_linear_search()


class TRPO(PPO):
    """Trust Region Policy Optimization (TRPO) algorithm.

    This class implements the TRPO algorithm, which is a policy gradient method
    that uses a trust region to ensure stable updates to the policy.

    See: https://arxiv.org/abs/1502.05477
    """

    @classmethod
    @override(PPO)
    def get_default_config(cls) -> AlgorithmConfig:
        return TRPOConfig()

    @classmethod
    @override(PPO)
    def get_default_policy_class(
        cls, config: AlgorithmConfig
    ) -> Optional[Type[Policy]]:
        if config["framework"] == "torch":
            return TRPOTorchPolicy
        else:
            raise ValueError(
                "TRPO only supports Torch framework. Please set `framework='torch'`."
            )


class TRPOTorchPolicy(PPOTorchPolicy):
    """Torch policy class for TRPO.

    This class implements the TRPO policy using PyTorch.
    """

    @override(PPOTorchPolicy)
    def loss(
        self,
        model: ModelV2,
        dist_class: Type[ActionDistribution],
        train_batch: SampleBatch,
    ) -> Union[TensorType, List[TensorType]]:
        logits, state = model(train_batch)
        curr_action_dist = dist_class(logits, model)

        advantages = train_batch[Postprocessing.ADVANTAGES]
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        logp_ratio = torch.exp(
            curr_action_dist.logp(train_batch[SampleBatch.ACTIONS])
            - train_batch[SampleBatch.ACTION_LOGP]
        )

        # RNN case: Mask away 0-padded chunks at end of time axis.
        if state:
            B = len(train_batch[SampleBatch.SEQ_LENS])
            max_seq_len = logits.shape[0] // B
            mask = sequence_mask(
                train_batch[SampleBatch.SEQ_LENS],
                max_seq_len,
                time_major=model.is_time_major(),
            )
            mask = torch.reshape(mask, [-1])
            num_valid = torch.sum(mask)

            def reduce_mean_valid(t):
                return torch.sum(t[mask]) / num_valid

            loss = (
                torch.sum(logp_ratio * advantages, dim=-1, keepdim=True) * mask
            ).sum() / mask.sum()
        # non-RNN case: No masking.
        else:
            mask = None
            reduce_mean_valid = torch.mean
            loss = torch.sum(logp_ratio * advantages, dim=-1, keepdim=True).mean()

        curr_entropy = curr_action_dist.entropy()

        # Compute a value function loss.

        prev_action_dist = dist_class(
            train_batch[SampleBatch.ACTION_DIST_INPUTS], model
        )
        action_kl = prev_action_dist.kl(curr_action_dist)

        if self.config["use_critic"]:
            prev_value_fn_out = train_batch[SampleBatch.VF_PREDS]  #
            value_fn_out = model.value_function()  # same as values
            vf_loss1 = torch.pow(
                value_fn_out - train_batch[Postprocessing.VALUE_TARGETS], 2.0
            )
            vf_clipped = prev_value_fn_out + torch.clamp(
                value_fn_out - prev_value_fn_out,
                -self.config["vf_clip_param"],
                self.config["vf_clip_param"],
            )
            vf_loss2 = torch.pow(
                vf_clipped - train_batch[Postprocessing.VALUE_TARGETS], 2.0
            )
            vf_loss = torch.max(vf_loss1, vf_loss2)
            mean_vf_loss = reduce_mean_valid(vf_loss)
        # Ignore the value function.
        else:
            vf_loss = mean_vf_loss = 0.0

        trust_region_updator = TrustRegionUpdator(
            model=model,
            dist_class=dist_class,
            train_batch=train_batch,
            adv_targ=advantages,
            initialize_policy_loss=loss,
            initialize_critic_loss=mean_vf_loss,
        )

        self.trpo_updator = trust_region_updator

        total_loss = -loss + reduce_mean_valid(
            self.kl_coeff * action_kl
            + self.config["vf_loss_coeff"] * vf_loss
            - self.entropy_coeff * curr_entropy
        )

        # Store values for stats function in model (tower), such that for
        # multi-GPU, we do not override them during the parallel loss phase.
        mean_kl_loss = reduce_mean_valid(action_kl)
        # mean_policy_loss = reduce_mean_valid(-policy_loss)
        mean_entropy = reduce_mean_valid(curr_entropy)

        model.tower_stats["total_loss"] = total_loss
        model.tower_stats["mean_policy_loss"] = -loss
        model.tower_stats["mean_vf_loss"] = mean_vf_loss
        model.tower_stats["vf_explained_var"] = explained_variance(
            train_batch[Postprocessing.VALUE_TARGETS], model.value_function()
        )
        model.tower_stats["mean_entropy"] = mean_entropy
        model.tower_stats["mean_kl_loss"] = mean_kl_loss

        return total_loss

    @override(PPOTorchPolicy)
    def postprocess_trajectory(
        self, sample_batch, other_agent_batches=None, episode=None
    ):
        sample_batch = compute_gae_for_sample_batch(
            self, sample_batch, other_agent_batches, episode
        )

        last_r = _get_last_r(self, sample_batch)
        gamma = self.config["gamma"]

        sample_batch = _add_returns(
            sample_batch=sample_batch, last_r=last_r, gamma=gamma
        )
        sample_batch = _add_deltas(
            sample_batch=sample_batch, last_r=last_r, gamma=gamma
        )

        return sample_batch

    @override(Policy)
    def apply_gradients(self, gradients: ModelGradients) -> None:
        self.trpo_updator.update()


class TRPOConfig(PPOConfig):
    def __init__(self, algo_class=None):
        """Initializes a TRPOConfig instance."""
        super().__init__(algo_class or TRPO)

        self.framework("torch")
        self.rl_module(_enable_rl_module_api=False)
        self.training(_enable_learner_api=False)
