from numpy import (
    ndarray,
    array,
    newaxis,
    concatenate,
    log,
)
from numpy.random import (
    Generator,
)
from tensorflow import (
    Tensor as tf_Tensor,
    Variable as tf_Variable,
    GradientTape as tf_GradientTape,
    float32 as tf_float32,
    function as tf_function,
    concat as tf_concat,
    add as tf_add,
    exp as tf_exp,
    reduce_min as tf_reduce_min,
    reduce_mean as tf_reduce_mean,
)

from src.models.helpers.network_models import (
    ValueNetwork,
    PolicyNetworkSoft,
)
from src.models.helpers.experience_buffer import (
    ExperienceBuffer,
)


class SoftActorCritic:
    def __init__(
            self,
            rng: Generator,
            future_reward_discount_gamma: float,
            entropy_scale_alpha_initial: float,
            target_entropy: float,
            entropy_scale_optimizer,
            entropy_scale_optimizer_args: dict,
            training_minimum_experiences: int,
            training_batch_size: int,
            training_target_update_momentum_tau: float,
            experience_buffer_args: dict,
            network_args: dict,
    ) -> None:
        """
        初始化SoftActorCritic类。
        
        参数:
        rng: 随机数生成器
        future_reward_discount_gamma:未来奖励折扣因子γ
        entropy_scale_alpha_initial: 熵缩放因子的初始值
        target_entropy: 目标熵值
        entropy_scale_optimizer: 熵缩放因子优化器
        entropy_scale_optimizer_args: 熵缩放因子优化器的参数字典
        training_minimum_experiences: 训练所需的最小经验数量
        training_batch_size: 训练批次大小
        training_target_update_momentum_tau: 目标网络更新的动量因子τ
        experience_buffer_args: 经验缓冲区的参数字典
        network_args: 网络参数字典
        """
        
        self.rng = rng

        self.future_reward_discount_gamma = future_reward_discount_gamma

        # 对熵缩放因子取对数并设置为可训练变量，以保持熵缩放因子为正值
        self.log_entropy_scale_alpha = tf_Variable(log(entropy_scale_alpha_initial),
                                                   trainable=True, dtype=tf_float32)
        self.target_entropy = target_entropy
        self.entropy_scale_alpha_optimizer = entropy_scale_optimizer(**entropy_scale_optimizer_args)

        self.training_minimum_experiences = training_minimum_experiences
        self.training_batch_size = training_batch_size
        self.training_target_update_momentum_tau = training_target_update_momentum_tau

        self.experience_buffer = ExperienceBuffer(rng=rng, **experience_buffer_args)

        self.networks: dict = {
            'value': [],
            'policy': [],
        }
        self._initialize_networks(**network_args)

    def _initialize_networks(
            self,
            value_network_args: dict,
            policy_network_args: dict,
            value_network_optimizer,
            value_network_optimizer_args: dict,
            # value_network_loss,
            policy_network_optimizer,
            policy_network_optimizer_args: dict,
            # policy_network_loss,
            size_state,
            num_actions,
    ) -> None:
        """
        初始化值网络和策略网络，包括创建和编译网络。
        
        参数:
        value_network_args: 值网络的参数字典
        policy_network_args: 策略网络的参数字典
        value_network_optimizer: 值网络优化器
        value_network_optimizer_args: 值网络优化器的参数字典
        policy_network_optimizer: 策略网络优化器
        policy_network_optimizer_args: 策略网络优化器的参数字典
        size_state: 状态的维度
        num_actions: 动作的数量
        """
        
        # 创建值网络和策略网络
        for _ in range(2):
            self.networks['value'].append(
                {
                    'primary': ValueNetwork(**value_network_args),
                    'target': ValueNetwork(**value_network_args),
                }
            )

        for _ in range(1):
            self.networks['policy'].append(
                {
                    'primary': PolicyNetworkSoft(num_actions=num_actions, **policy_network_args),
                    'target': PolicyNetworkSoft(num_actions=num_actions, **policy_network_args),
                }
            )

        # 编译网络
        dummy_state = self.rng.random(size_state)
        dummy_action = self.rng.random(num_actions)
        for network_type, network_list in self.networks.items():
            # 创建虚拟输入
            if network_type == 'policy':
                dummy_input = dummy_state[newaxis]
                optimizer = policy_network_optimizer
                optimizer_args = policy_network_optimizer_args
                # loss = policy_network_loss
            elif network_type == 'value':
                dummy_input = concatenate([dummy_state, dummy_action])[newaxis]
                optimizer = value_network_optimizer
                optimizer_args = value_network_optimizer_args
                # loss = value_network_loss
            # 输入虚拟数据，编译主网络
            for network_pair in network_list:
                for network_rank, network in network_pair.items():
                    network.initialize_inputs(dummy_input)
                network_pair['primary'].compile(
                    optimizer=optimizer(**optimizer_args),
                    # loss=loss,  # TODO: 目前保存/加载时损失无法工作，可能需要修复
                    jit_compile=True,
                )
        self.update_target_networks(tau_target_update_momentum=1.0)

    @tf_function
    def update_target_networks(
            self,
            tau_target_update_momentum: float,
    ) -> None:
        """
        更新目标网络的参数。
        
        参数:
        tau_target_update_momentum: 目标网络更新的动量因子τ
        """
        
        if tau_target_update_momentum == 0:
            return

        for network_list in self.networks.values():
            for network_pair in network_list:
                for v_primary, v_target in zip(network_pair['primary'].trainable_variables,
                                               network_pair['target'].trainable_variables):
                    v_target.assign(tau_target_update_momentum * v_primary
                                    + (1 - tau_target_update_momentum) * v_target)

    def get_action(
            self,
            state,
    ) -> ndarray:
        """
        根据当前状态获取动作。
        
        参数:
        state: 当前状态
        
        返回:
        动作
        """
        actions, _ = self.networks['policy'][0]['primary'].get_action_and_log_prob_density(state=state)

        return actions.numpy().flatten()

    def add_experience(
            self,
            experience: dict,
    ) -> None:
        """
        将经验添加到经验缓冲区。
        
        参数:
        experience: 经验字典
        """
        self.experience_buffer.add_experience(experience=experience)

    def train(
            self,
            toggle_train_value_networks: bool = True,
            toggle_train_policy_network: bool = True,
            toggle_train_entropy_scale_alpha: bool = True,
    ) -> tuple:
        """
        进行训练，包括值网络、策略网络和熵缩放因子的训练。
        
        参数:
        toggle_train_value_networks: 是否训练值网络
        toggle_train_policy_network: 是否训练策略网络
        toggle_train_entropy_scale_alpha: 是否训练熵缩放因子
        
        返回:
        平均对数概率密度和值损失
        """
        
        if self.experience_buffer.get_len() < self.training_minimum_experiences:
            return 0, -1

        (
            sample_experiences,
            experience_ids,
            sample_importance_weights,
        ) = self.experience_buffer.sample(batch_size=self.training_batch_size)

        mean_log_prob_density, value_loss = self.train_graph(
            states=array(
                [experience['state'] for experience in sample_experiences], dtype='float32'),
            actions=array(
                [experience['action'] for experience in sample_experiences], dtype='float32'),
            rewards=array(
                [experience['reward'] for experience in sample_experiences], dtype='float32')[newaxis].transpose(),
            next_states=array(
                [experience['next_state'] for experience in sample_experiences], dtype='float32'),
            sample_importance_weights=array(
                sample_importance_weights, dtype='float32')[newaxis].transpose(),
            toggle_train_value_networks=toggle_train_value_networks,
            toggle_train_policy_network=toggle_train_policy_network,
            toggle_train_entropy_scale_alpha=toggle_train_entropy_scale_alpha,
        )

        return mean_log_prob_density, value_loss

    @tf_function
    def train_graph(
            self,
            states,
            actions,
            rewards,
            next_states,
            sample_importance_weights,
            toggle_train_value_networks,
            toggle_train_policy_network,
            toggle_train_entropy_scale_alpha,
    ) -> tuple[tf_Tensor, tf_Tensor]:
        """
        使用TensorFlow图进行训练。
        
        参数:
        states: 状态
        actions: 动作
        rewards: 奖励
        next_states: 下一状态
        sample_importance_weights: 样本重要性权重
        toggle_train_value_networks: 是否训练值网络
        toggle_train_policy_network: 是否训练策略网络
        toggle_train_entropy_scale_alpha: 是否训练熵缩放因子
        
        返回:
        平均对数概率密度和值损失
        """
        
        if toggle_train_value_networks:
            # 构建目标: r(s, a) + γ * (Q_hat(s', a') - α * log prob(a'|s'))
            # 在均值意义上，熵奖励项增加了导致 a' 的 s+a 的值，这种情况的方差较高
            target_q = rewards
            if self.future_reward_discount_gamma > 0.0:
                (
                    next_actions,
                    next_action_log_prob_densities,
                ) = self.networks['policy'][0]['primary'].get_action_and_log_prob_density(state=next_states)
                value_network_input = tf_concat([next_states, next_actions], axis=1)
                next_states_value_estimates_1 = self.networks['value'][0]['target'].call(value_network_input)
                next_states_value_estimates_2 = self.networks['value'][1]['target'].call(value_network_input)
                next_states_conservative_value_estimates = tf_reduce_min(
                    [next_states_value_estimates_1, next_states_value_estimates_2], axis=0)
                target_q = target_q + self.future_reward_discount_gamma * (
                    next_states_conservative_value_estimates
                    - tf_exp(self.log_entropy_scale_alpha) * next_action_log_prob_densities
                )

            value_network_input_batch = tf_concat([states, actions], axis=1)
            network = self.networks['value'][0]['primary']
            with tf_GradientTape() as tape:  # 自动微分
                estimated_q = network.call(value_network_input_batch)
                td_error = estimated_q - target_q
                value_loss = tf_reduce_mean(sample_importance_weights * td_error ** 2)
            gradients = tape.gradient(target=value_loss, sources=network.trainable_variables)
            network.optimizer.apply_gradients(zip(gradients, network.trainable_variables))

            network = self.networks['value'][1]['primary']
            with tf_GradientTape() as tape:  # 自动微分
                estimated_q = network.call(value_network_input_batch)
                td_error = estimated_q - target_q
                value_loss = tf_reduce_mean(sample_importance_weights * td_error ** 2)
            gradients = tape.gradient(target=value_loss, sources=network.trainable_variables)
            network.optimizer.apply_gradients(zip(gradients, network.trainable_variables))

            self.update_target_networks(tau_target_update_momentum=self.training_target_update_momentum_tau)

        if toggle_train_policy_network:
            policy_network_input_batch = states
            network = self.networks['policy'][0]['primary']
            with tf_GradientTape() as tape:
                (
                    policy_actions,
                    policy_action_log_prob_densities,
                ) = network.get_action_and_log_prob_density(state=policy_network_input_batch)
                value_network_input_batch = tf_concat([states, policy_actions], axis=1)
                # 目标或主网络？主网络 -> 更快的更新，目标网络 -> 稳定但延迟
                value_estimate_1 = self.networks['value'][0]['primary'].call(value_network_input_batch)
                value_estimate_2 = self.networks['value'][1]['primary'].call(value_network_input_batch)
                value_estimate_mean = tf_reduce_min([value_estimate_1, value_estimate_2], axis=0)
                policy_loss = tf_reduce_mean(
                    # 拉向高值：
                    sample_importance_weights * -value_estimate_mean
                    # 拉向高方差 - 我们想要最小化平均对数概率 -> 更多的不确定性：
                    + tf_exp(self.log_entropy_scale_alpha) * policy_action_log_prob_densities
                )
            gradients = tape.gradient(target=policy_loss, sources=network.trainable_variables)
            network.optimizer.apply_gradients(zip(gradients, network.trainable_variables))

        if toggle_train_entropy_scale_alpha:
            with tf_GradientTape() as tape:
                # 如果 (logprobs (负) + 目标熵) > 0，则增加方差的权重
                # 以鼓励更高的方差，从而使 logprobs + 目标熵更接近于零
                alpha_loss = -self.log_entropy_scale_alpha * tf_reduce_mean(
                    tf_add(policy_action_log_prob_densities, self.target_entropy))
            alpha_gradients = tape.gradient(target=alpha_loss, sources=[self.log_entropy_scale_alpha])
            self.entropy_scale_alpha_optimizer.apply_gradients(zip(alpha_gradients, [self.log_entropy_scale_alpha]))

        return tf_reduce_mean(policy_action_log_prob_densities), value_loss
